[VOL-5486] Fix deprecated versions

Change-Id: Ic398322948171d8a1c5b4e866602805ec0ea396f
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/IBM/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore
new file mode 100644
index 0000000..f887957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.gitignore
@@ -0,0 +1,31 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/bin
+/coverage.txt
+/profile.out
+/output.json
+
+.idea
diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml
new file mode 100644
index 0000000..520e422
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.golangci.yml
@@ -0,0 +1,104 @@
+# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json
+version: "2"
+linters:
+  default: none
+  enable:
+  - bodyclose
+  - copyloopvar
+  - depguard
+  - dogsled
+  - errcheck
+  - errorlint
+  - funlen
+  - gochecknoinits
+  - gocritic
+  - gocyclo
+  - gosec
+  - govet
+  - misspell
+  - nilerr
+  - unconvert
+  - unused
+  - whitespace
+  settings:
+    depguard:
+      rules:
+        main:
+          deny:
+          - pkg: io/ioutil
+            desc: Use the "io" and "os" packages instead.
+    dupl:
+      threshold: 100
+    funlen:
+      lines: 300
+      statements: 300
+    goconst:
+      min-len: 2
+      min-occurrences: 3
+    gocritic:
+      enabled-checks:
+      - importShadow
+      - nestingReduce
+      - stringsCompare
+      # - unnamedResult
+      # - whyNoLint
+      disabled-checks:
+      - assignOp
+      - appendAssign
+      - commentedOutCode
+      - hugeParam
+      - ifElseChain
+      - singleCaseSwitch
+      - sloppyReassign
+      enabled-tags:
+      - diagnostic
+      - performance
+      # - experimental
+      # - opinionated
+      # - style
+    gocyclo:
+      min-complexity: 99
+    govet:
+      disable:
+      - fieldalignment
+      - shadow
+      enable-all: true
+    misspell:
+      locale: US
+  # exclude some linters from running on certains files.
+  exclusions:
+    generated: lax
+    presets:
+    - comments
+    - common-false-positives
+    - legacy
+    - std-error-handling
+    rules:
+    - linters:
+      - paralleltest
+      path: functional.*_test\.go
+    - path: (.+)\.go$
+      text: 'G115: integer overflow conversion'
+    - path: (.+)\.go$
+      text: 'G404: Use of weak random number generator'
+    paths:
+    - third_party$
+    - builtin$
+    - examples$
+issues:
+  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
+  max-same-issues: 0
+formatters:
+  enable:
+  - gofmt
+  - goimports
+  settings:
+    goimports:
+      local-prefixes:
+      - github.com/IBM/sarama
+  exclusions:
+    generated: lax
+    paths:
+    - third_party$
+    - builtin$
+    - examples$
diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
new file mode 100644
index 0000000..5387686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
@@ -0,0 +1,41 @@
+fail_fast: false
+default_install_hook_types: [pre-commit, commit-msg]
+repos:
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v6.0.0
+    hooks:
+      - id: check-merge-conflict
+      - id: check-yaml
+      - id: end-of-file-fixer
+      - id: fix-byte-order-marker
+      - id: mixed-line-ending
+      - id: trailing-whitespace
+  - repo: local
+    hooks:
+      - id: conventional-commit-msg-validation
+        name: commit message conventional validation
+        language: pygrep
+        entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)'
+        args: [--multiline, --negate]
+        stages: [commit-msg]
+      - id: commit-msg-needs-to-be-signed-off
+        name: commit message needs to be signed off
+        language: pygrep
+        entry: "^Signed-off-by:"
+        args: [--multiline, --negate]
+        stages: [commit-msg]
+      - id: gofmt
+        name: gofmt
+        description: Format files with gofmt.
+        entry: gofmt -l
+        language: golang
+        files: \.go$
+        args: []
+  - repo: https://github.com/gitleaks/gitleaks
+    rev: v8.28.0
+    hooks:
+      - id: gitleaks
+  - repo: https://github.com/golangci/golangci-lint
+    rev: v2.4.0
+    hooks:
+      - id: golangci-lint
diff --git a/vendor/github.com/IBM/sarama/.whitesource b/vendor/github.com/IBM/sarama/.whitesource
new file mode 100644
index 0000000..26e9c47
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.whitesource
@@ -0,0 +1,3 @@
+{
+  "settingsInheritedFrom": "ibm-mend-config/mend-config@main"
+}
\ No newline at end of file
diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md
new file mode 100644
index 0000000..99abeb3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CHANGELOG.md
@@ -0,0 +1,1760 @@
+# Changelog
+
+## Version 1.42.2 (2024-02-09)
+
+## What's Changed
+
+⚠️ The go.mod directive has been bumped to 1.18 as the minimum version of Go required for the module. This was necessary to continue to receive updates from some of the third party dependencies that Sarama makes use of for compression.
+
+### :tada: New Features / Improvements
+* feat: update go directive to 1.18 by @dnwe in https://github.com/IBM/sarama/pull/2713
+* feat: return KError instead of errors in AlterConfigs and DescribeConfig by @zhuliquan in https://github.com/IBM/sarama/pull/2472
+### :bug: Fixes
+* fix: don't waste time for backoff on member id required error by @lzakharov in https://github.com/IBM/sarama/pull/2759
+* fix: prevent ConsumerGroup.Close infinitely locking by @maqdev in https://github.com/IBM/sarama/pull/2717
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/IBM/sarama/pull/2716
+* chore(deps): bump golang.org/x/sync to v0.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2718
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.18 to 4.1.19 by @dependabot in https://github.com/IBM/sarama/pull/2739
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2748
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2734
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2764
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.19 to 4.1.21 by @dependabot in https://github.com/IBM/sarama/pull/2763
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/exactly_once by @dependabot in https://github.com/IBM/sarama/pull/2749
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/consumergroup by @dependabot in https://github.com/IBM/sarama/pull/2750
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/sasl_scram_client by @dependabot in https://github.com/IBM/sarama/pull/2751
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2752
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/http_server by @dependabot in https://github.com/IBM/sarama/pull/2753
+* chore(deps): bump github.com/eapache/go-resiliency from 1.4.0 to 1.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2745
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/txn_producer by @dependabot in https://github.com/IBM/sarama/pull/2754
+* chore(deps): bump go.opentelemetry.io/otel/sdk from 1.19.0 to 1.22.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2767
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2793
+* chore(deps): bump go.opentelemetry.io/otel/exporters/stdout/stdoutmetric from 0.42.0 to 1.23.1 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2792
+### :wrench: Maintenance
+* fix(examples): housekeeping of code and deps by @dnwe in https://github.com/IBM/sarama/pull/2720
+### :heavy_plus_sign: Other Changes
+* fix(test): retry MockBroker Listen for EADDRINUSE by @dnwe in https://github.com/IBM/sarama/pull/2721
+
+## New Contributors
+* @maqdev made their first contribution in https://github.com/IBM/sarama/pull/2717
+* @zhuliquan made their first contribution in https://github.com/IBM/sarama/pull/2472
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.1...v1.42.2
+
+## Version 1.42.1 (2023-11-07)
+
+## What's Changed
+### :bug: Fixes
+* fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705
+* fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1
+
+## Version 1.42.0 (2023-11-02)
+
+## What's Changed
+### :bug: Fixes
+* Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693
+* Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698
+* fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678
+* Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700
+* fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701
+### :wrench: Maintenance
+* chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685
+* chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692
+### :heavy_plus_sign: Other Changes
+* chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683
+* fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689
+* chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691
+
+## New Contributors
+* @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693
+* @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0
+
+## Version 1.41.3 (2023-10-17)
+
+## What's Changed
+### :bug: Fixes
+* fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663
+* fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661
+* chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671
+### :memo: Documentation
+* fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657
+
+## New Contributors
+* @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657
+* @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3
+
+## Version 1.41.2 (2023-09-12)
+
+## What's Changed
+### :tada: New Features / Improvements
+* perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646
+### :bug: Fixes
+* fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636
+* fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642
+* fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641
+
+## New Contributors
+* @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2
+
+## Version 1.41.1 (2023-08-30)
+
+## What's Changed
+### :bug: Fixes
+* fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618
+* fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628
+* fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633
+* fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632
+### :wrench: Maintenance
+* chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620
+### :memo: Documentation
+* fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386
+* chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627
+### :heavy_plus_sign: Other Changes
+* chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1
+
+## Version 1.41.0 (2023-08-21)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version.
+
+* chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572
+* chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574
+### :tada: New Features / Improvements
+* Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156
+* feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536
+* feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538
+* fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544
+* Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541
+* fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546
+* fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548
+* chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549
+* chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550
+* chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551
+* fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553
+* fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554
+* fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555
+* fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556
+* fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558
+* fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565
+* fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568
+* feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566
+* fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570
+* feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573
+* feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475
+* feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600
+### :bug: Fixes
+* fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528
+* fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532
+* extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533
+* Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545
+* fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563
+* bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560
+* fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569
+* fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594
+* fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595
+* fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614
+* fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616
+* fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586
+* chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588
+* chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598
+### :wrench: Maintenance
+* fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531
+* chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537
+* Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543
+* fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552
+* chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557
+* fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571
+* chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576
+* chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575
+* chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578
+* chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580
+* chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579
+* chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581
+* chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591
+* feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592
+* chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593
+* feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601
+* fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603
+* fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612
+* fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613
+* chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615
+* feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605
+### :memo: Documentation
+* fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479
+* chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599
+
+## New Contributors
+* @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156
+* @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543
+* @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479
+* @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560
+* @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475
+* @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0
+
+## Version 1.40.1 (2023-07-27)
+
+## What's Changed
+### :tada: New Features / Improvements
+* Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484
+* feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457
+### :bug: Fixes
+* Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517
+* fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519
+* Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520
+* fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522
+* fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509
+* chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513
+* chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512
+### :wrench: Maintenance
+* chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496
+* chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504
+* Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514
+### :heavy_plus_sign: Other Changes
+* chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497
+* chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508
+* fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523
+
+## New Contributors
+* @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504
+* @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514
+* @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484
+* @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1
+
+## Version 1.40.0 (2023-07-17)
+
+## What's Changed
+
+Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461
+
+### :rotating_light: Breaking Changes
+
+- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492
+- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494
+
+### :bug: Fixes
+
+- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427
+- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428
+- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451
+- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447
+- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453
+
+### :package: Dependency updates
+
+- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452
+
+### :wrench: Maintenance
+
+- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434
+- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489
+- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485
+
+## New Contributors
+
+- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452
+- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447
+- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0
+
+## Version 1.38.1 (2023-01-22)
+
+## What's Changed
+### :bug: Fixes
+* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420
+* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410
+* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413
+* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411
+* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412
+* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418
+
+## New Contributors
+* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420
+* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1
+
+## Version 1.38.0 (2023-01-08)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375
+* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388
+* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373
+### :bug: Fixes
+* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389
+* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385
+* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404
+* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387
+* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378
+* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403
+### :wrench: Maintenance
+* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390
+* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406
+
+## New Contributors
+* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385
+* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404
+* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387
+* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0
+
+## Version 1.37.2 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356
+### :heavy_plus_sign: Other Changes
+* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2
+
+## Version 1.37.1 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352
+* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353
+* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355
+
+## New Contributors
+* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1
+
+## Version 1.37.0 (2022-09-28)
+
+## What's Changed
+
+### :rotating_light: Breaking Changes
+* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release.
+
+### :tada: New Features / Improvements
+* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339
+* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295
+* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328
+### :bug: Fixes
+* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329
+* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317
+* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340
+* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331
+* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345
+* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327
+### :package: Dependency updates
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335
+* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333
+* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334
+* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348
+* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336
+* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350
+### :wrench: Maintenance
+* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346
+* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347
+
+## New Contributors
+* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329
+* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317
+* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328
+* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340
+* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0
+
+## Version 1.36.0 (2022-08-11)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252
+* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315
+* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299
+### :bug: Fixes
+* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304
+* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301
+* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311
+* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307
+* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313
+* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305
+* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303
+### :wrench: Maintenance
+* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300
+### :heavy_plus_sign: Other Changes
+* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294
+* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297
+* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312
+
+## New Contributors
+* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294
+* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252
+* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0
+
+## Version 1.35.0 (2022-07-22)
+
+## What's Changed
+### :bug: Fixes
+* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256
+* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285
+* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269
+* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292
+### :package: Dependency updates
+* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284
+* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283
+* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281
+* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280
+### :wrench: Maintenance
+* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272
+* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288
+* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289
+### :heavy_plus_sign: Other Changes
+* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286
+* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287
+* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291
+
+## New Contributors
+* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0
+
+##  Version 1.34.1 (2022-06-07)
+
+## What's Changed
+### :bug: Fixes
+* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240
+* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247
+* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248
+* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245
+### :wrench: Maintenance
+* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236
+* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242
+
+## New Contributors
+* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240
+* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1
+
+## Version 1.34.0 (2022-05-30)
+
+## What's Changed
+### :tada: New Features / Improvements
+* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230
+### :bug: Fixes
+* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234
+### :wrench: Maintenance
+* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231
+* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232
+
+## New Contributors
+* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0
+
+## Version 1.33.0 (2022-05-11)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.**
+* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131
+
+
+### :tada: New Features / Improvements
+* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172
+* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197
+* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191
+### :bug: Fixes
+* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154
+* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144
+* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109
+* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165
+* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166
+* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164
+* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171
+* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185
+* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182
+* fix: prevent RefreshBrokers leaking old brokers  by @k-wall in https://github.com/IBM/sarama/pull/2203
+* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204
+* fix: prevent AsyncProducer retryBatch from leaking  by @k-wall in https://github.com/IBM/sarama/pull/2208
+* fix: prevent metrics leak when authenticate fails  by @Stephan14 in https://github.com/IBM/sarama/pull/2205
+* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194
+* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178
+* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213
+* fix: cope with OffsetsLoadInProgress on Join+Sync  by @dnwe in https://github.com/IBM/sarama/pull/2214
+* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227
+### :package: Dependency updates
+* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170
+### :wrench: Maintenance
+* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161
+* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162
+* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183
+* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210
+### :heavy_plus_sign: Other Changes
+* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200
+* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226
+
+## New Contributors
+* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154
+* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171
+* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185
+* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172
+* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182
+* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178
+* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0
+
+## Version 1.32.0 (2022-02-24)
+
+### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used.
+
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+
+---
+
+## What's Changed
+### :bug: Fixes
+* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133
+### :package: Dependency updates
+* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159
+### :wrench: Maintenance
+* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130
+* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939
+* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138
+### :heavy_plus_sign: Other Changes
+* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145
+* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146
+* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147
+* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0
+
+## Version 1.31.1 (2022-02-01)
+
+- #2126 - @bai - Populate missing kafka versions
+- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image
+- #2123 - @bai - Update klauspost/compress to 0.14
+- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy
+- #2119 - @bai - Add Kafka 3.1.0 version number
+- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption
+- #2051 - @seveas - Expose the TLS connection state of a broker connection
+- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys
+- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup
+- #2113 - @mosceo - Fix typo
+
+## Version 1.31.0 (2022-01-18)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088
+* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686
+* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094
+### :bug: Fixes
+* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080
+* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081
+* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082
+* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096
+* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107
+* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108
+* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078
+* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111
+### :wrench: Maintenance
+* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100
+### :memo: Documentation
+* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099
+### :heavy_plus_sign: Other Changes
+* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084
+
+## New Contributors
+* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080
+* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088
+* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686
+* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0
+
+## Version 1.30.1 (2021-12-04)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045
+### :bug: Fixes
+* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048
+* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054
+* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057
+* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076
+### :wrench: Maintenance
+* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046
+* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070
+
+## Notes
+* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x
+
+## New Contributors
+* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048
+* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045
+* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054
+* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1
+
+## Version 1.30.0 (2021-09-29)
+
+⚠️ This release has been superseded by v1.30.1 and should _not_ be used.
+
+**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076
+
+---
+
+ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x**
+
+---
+
+# New Features / Improvements
+
+- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh
+- #2000 - @matzew - Using xdg-go module for SCRAM
+- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures
+- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM
+- #2006 - @faillefer - Add support for DeleteOffsets operation
+- #1909 - @agriffaut - KIP-546 Client quota APIs
+- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state
+- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger
+- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log
+- #2019 - @dnwe - feat: add logging & a metric for producer throttle
+- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface
+- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol
+- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open
+- #2034 - @bai - Add support for kafka 3.0.0
+
+# Fixes
+
+- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest
+- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation
+- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls
+- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true
+- #2007 - @bai - Add support for Go 1.17
+- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks
+- #2010 - @dnwe - chore: enable exportloopref and misspell linters
+- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements
+- #2015 - @bai - Change default branch to main
+- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID()
+- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0
+- #2016 - @dnwe - chore: replace deprecated Go calls
+- #2017 - @dnwe - chore: delete legacy vagrant script
+- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test
+- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5
+- #2033 - @bai - Update dependencies
+- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method
+- #2035 - @dnwe - chore: populate the missing kafka versions
+- #2038 - @dnwe - feat: add a fuzzing workflow to github actions
+
+## New Contributors
+* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983
+* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990
+* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988
+* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001
+* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003
+* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973
+* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992
+* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006
+* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718
+* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0
+
+## Version 1.29.1 (2021-06-24)
+
+# New Features / Improvements
+
+- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API
+- #1964 - @ajanikow - Add DelegationToken ResourceType
+
+# Fixes
+
+- #1962 - @hanxiaolin - fix(consumer):  call interceptors when MaxProcessingTime expire
+- #1971 - @KerryJava - fix  kafka-producer-performance throughput panic
+- #1968 - @dnwe - chore: bump golang.org/x versions
+- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers
+- #1963 - @dnwe - fix: ensure backoff timer is re-used
+- #1949 - @dnwe - fix: explicitly use uint64 for payload length
+
+## Version 1.29.0 (2021-05-07)
+
+### New Features / Improvements
+
+- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API
+- #1869 - @wyndhblb - zstd: encode+decode performance improvements
+- #1541 - @izolight - add String, (Un)MarshalText for acl types.
+- #1921 - @bai - Add support for Kafka 2.8.0
+
+### Fixes
+- #1936 - @dnwe - fix(consumer): follow preferred broker
+- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response)
+- #1926 - @dnwe - fix: correct initial CodeQL findings
+- #1925 - @bai - Test out CodeQL
+- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos
+- #1922 - @bai - Update go dependencies
+- #1898 - @mmaslankaprv - Parsing only known control batches value
+- #1887 - @withshubh - Fix: issues affecting code quality
+
+## Version 1.28.0 (2021-02-15)
+
+**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
+
+- #1870 - @kvch - Update Kerberos library to latest major
+- #1876 - @bai - Update docs, reference pkg.go.dev
+- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
+- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
+- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
+- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
+- #1862 - @bai - Fix CI setenv permissions issues
+- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
+- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
+
+## Version 1.27.2 (2020-10-21)
+
+### Improvements
+
+#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
+
+## Fixes
+
+#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
+
+## Version 1.27.1 (2020-10-07)
+
+### Improvements
+
+#1775 - @d1egoaz - Adds a Producer Interceptor example
+#1781 - @justin-chen - Refresh brokers given list of seed brokers
+#1784 - @justin-chen - Add randomize seed broker method
+#1790 - @d1egoaz - remove example binary
+#1798 - @bai - Test against Go 1.15
+#1785 - @justin-chen - Add private method to Client interface to prevent implementation
+#1802 - @uvw - Support Go 1.13 error unwrapping
+
+## Fixes
+
+#1791 - @stanislavkozlovski - bump default version to 1.0.0
+
+## Version 1.27.0 (2020-08-11)
+
+### Improvements
+
+#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
+#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
+#1699 - @wclaeys  - Consumer group support for manually comitting offsets
+#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
+#1726 - @d1egoaz - Include zstd on the functional tests
+#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
+#1738 - @varun06 - fixed variable names that are named same as some std lib package names
+#1741 - @varun06 - updated zstd dependency to latest v1.10.10
+#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
+#1763 - @alrs - remove deprecated tls options from test
+#1769 - @bai - Add support for Kafka 2.6.0
+
+## Fixes
+
+#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+#1744 - @alrs  - Fix isBalanced Function Signature
+
+## Version 1.26.4 (2020-05-19)
+
+## Fixes
+
+- #1701 - @d1egoaz - Set server name only for the current broker
+- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
+
+## Version 1.26.3 (2020-05-07)
+
+## Fixes
+
+- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
+
+## Version 1.26.2 (2020-05-06)
+
+## ⚠️ Known Issues
+
+This release has been marked as not ready for production and may be unstable, please use v1.26.4.
+
+### Improvements
+
+- #1560 - @iyacontrol - add sync pool for gzip 1-9
+- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
+- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
+- #1632 - @bai - Add support for Go 1.14
+- #1640 - @random-dwi - Feature/fix list partition reassignments
+- #1646 - @mimaison - Add DescribeLogDirs to admin client
+- #1667 - @bai - Add support for kafka 2.5.0
+
+## Fixes
+
+- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
+- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
+- #1602 - @d1egoaz - adds a note about consumer groups Consume method
+- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
+- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
+- #1614 - @alrs - produce_response.go: Remove Unused Functions
+- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
+- #1639 - @agriffaut - Handle errors with no message but error code
+- #1643 - @kzinglzy - fix `config.net.keepalive`
+- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
+- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
+- #1650 - @lavoiesl - Return the response error in heartbeatLoop
+- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
+- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
+
+## Version 1.26.1 (2020-02-04)
+
+Improvements:
+- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539))
+- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595))
+- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573))
+- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592))
+
+Bug Fixes:
+- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590))
+- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589))
+
+## Version 1.26.0 (2020-01-24)
+
+New Features:
+- Enable zstd compression
+  ([1574](https://github.com/IBM/sarama/pull/1574),
+  [1582](https://github.com/IBM/sarama/pull/1582))
+- Support headers in tools kafka-console-producer
+  ([1549](https://github.com/IBM/sarama/pull/1549))
+
+Improvements:
+- Add SASL AuthIdentity to SASL frames (authzid)
+  ([1585](https://github.com/IBM/sarama/pull/1585)).
+
+Bug Fixes:
+- Sending messages with ZStd compression enabled fails in multiple ways
+  ([1252](https://github.com/IBM/sarama/issues/1252)).
+- Use the broker for any admin on BrokerConfig
+  ([1571](https://github.com/IBM/sarama/pull/1571)).
+- Set DescribeConfigRequest Version field
+  ([1576](https://github.com/IBM/sarama/pull/1576)).
+- ConsumerGroup flooding logs with client/metadata update req
+  ([1578](https://github.com/IBM/sarama/pull/1578)).
+- MetadataRequest version in DescribeCluster
+  ([1580](https://github.com/IBM/sarama/pull/1580)).
+- Fix deadlock in consumer group handleError
+  ([1581](https://github.com/IBM/sarama/pull/1581))
+- Fill in the Fetch{Request,Response} protocol
+  ([1582](https://github.com/IBM/sarama/pull/1582)).
+- Retry topic request on ControllerNotAvailable
+  ([1586](https://github.com/IBM/sarama/pull/1586)).
+
+## Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/IBM/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/IBM/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/IBM/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/IBM/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/IBM/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/IBM/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/IBM/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/IBM/sarama/pull/1545)).
+
+## Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+  ([1520](https://github.com/IBM/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+  ([1518](https://github.com/IBM/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+  ([1525](https://github.com/IBM/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+  ([1529](https://github.com/IBM/sarama/pull/1529)).
+
+## Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+  ([1416](https://github.com/IBM/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+  ([1477](https://github.com/IBM/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+  ([1415](https://github.com/IBM/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+  ([1452](https://github.com/IBM/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+  ([1458](https://github.com/IBM/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+  ([1461](https://github.com/IBM/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+  ([1464](https://github.com/IBM/sarama/pull/1464)).
+- Remove direct usage of gofork
+  ([1465](https://github.com/IBM/sarama/pull/1465)).
+- Add support for Go 1.13
+  ([1478](https://github.com/IBM/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+  ([1481](https://github.com/IBM/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+  ([1434](https://github.com/IBM/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+  ([1442](https://github.com/IBM/sarama/pull/1442)).
+- Use released version of lz4 library
+  ([1469](https://github.com/IBM/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+  ([1484](https://github.com/IBM/sarama/pull/1484)).
+- Fix CLI help message typo
+  ([1494](https://github.com/IBM/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+  See https://github.com/IBM/sarama/issues/1252
+
+## Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+  ([1425](https://github.com/IBM/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+  ([1428](https://github.com/IBM/sarama/pull/1428)).
+
+## Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+  ([1418](https://github.com/IBM/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+  ([1374](https://github.com/IBM/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+  ([1417](https://github.com/IBM/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+  ([1410](https://github.com/IBM/sarama/pull/1410)).
+- Add kerberos support
+  ([1366](https://github.com/IBM/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+  ([1406](https://github.com/IBM/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+  ([1404](https://github.com/IBM/sarama/pull/1404)).
+- Add support for error codes 77—81
+  ([1397](https://github.com/IBM/sarama/pull/1397)).
+- Pool internal objects allocated per message
+  ([1385](https://github.com/IBM/sarama/pull/1385)).
+- Reduce packet decoder allocations
+  ([1373](https://github.com/IBM/sarama/pull/1373)).
+- Support timeout when fetching metadata
+  ([1359](https://github.com/IBM/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+  ([1376](https://github.com/IBM/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+  ([1383](https://github.com/IBM/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+  ([1368](https://github.com/IBM/sarama/pull/1368)).
+
+## Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+  ([1350](https://github.com/IBM/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+  ([1354](https://github.com/IBM/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+  ([1353](https://github.com/IBM/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+  ([1349](https://github.com/IBM/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+  ([1344](https://github.com/IBM/sarama/pull/1344)).
+
+## Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+  ([1318](https://github.com/IBM/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+  ([1326](https://github.com/IBM/sarama/pull/1326)).
+- Implement ReadCommitted
+  ([1307](https://github.com/IBM/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+  ([1331](https://github.com/IBM/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+  ([1331](https://github.com/IBM/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+  ([1232](https://github.com/IBM/sarama/pull/1232)).
+- Add SCRAM authentication example
+  ([1303](https://github.com/IBM/sarama/pull/1303)).
+- Add consumergroup examples
+  ([1304](https://github.com/IBM/sarama/pull/1304)).
+- Expose consumer batch size metric
+  ([1296](https://github.com/IBM/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+  ([1300](https://github.com/IBM/sarama/pull/1300)).
+- Reduce client close bookkeeping
+  ([1297](https://github.com/IBM/sarama/pull/1297)).
+- Satisfy error interface in create responses
+  ([1154](https://github.com/IBM/sarama/pull/1154)).
+- Please lint gods
+  ([1346](https://github.com/IBM/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+  ([1338](https://github.com/IBM/sarama/pull/1338)).
+- Update lz4 to latest version
+  ([1347](https://github.com/IBM/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+  ([1231](https://github.com/IBM/sarama/pull/1231)).
+- Fix cleanup error handler
+  ([1332](https://github.com/IBM/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+  ([1156](https://github.com/IBM/sarama/pull/1156)).
+
+## Version 1.21.0 (2019-02-24)
+
+New Features:
+- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
+  ([1236](https://github.com/IBM/sarama/pull/1236)).
+- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
+  ([1178](https://github.com/IBM/sarama/pull/1178)).
+- Implement SASL/OAUTHBEARER
+  ([1240](https://github.com/IBM/sarama/pull/1240)).
+
+Improvements:
+- Add Go mod support
+  ([1282](https://github.com/IBM/sarama/pull/1282)).
+- Add error codes 73—76
+  ([1239](https://github.com/IBM/sarama/pull/1239)).
+- Add retry backoff function
+  ([1160](https://github.com/IBM/sarama/pull/1160)).
+- Maintain metadata in the producer even when retries are disabled
+  ([1189](https://github.com/IBM/sarama/pull/1189)).
+- Include ReplicaAssignment in ListTopics
+  ([1274](https://github.com/IBM/sarama/pull/1274)).
+- Add producer performance tool
+  ([1222](https://github.com/IBM/sarama/pull/1222)).
+- Add support LogAppend timestamps
+  ([1258](https://github.com/IBM/sarama/pull/1258)).
+
+Bug Fixes:
+- Fix potential deadlock when a heartbeat request fails
+  ([1286](https://github.com/IBM/sarama/pull/1286)).
+- Fix consuming compacted topic
+  ([1227](https://github.com/IBM/sarama/pull/1227)).
+- Set correct Kafka version for DescribeConfigsRequest v1
+  ([1277](https://github.com/IBM/sarama/pull/1277)).
+- Update kafka test version
+  ([1273](https://github.com/IBM/sarama/pull/1273)).
+
+## Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+  ([1100](https://github.com/IBM/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+  ([1230](https://github.com/IBM/sarama/pull/1230)).
+- Reuse compression objects
+  ([1185](https://github.com/IBM/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+  ([1243](https://github.com/IBM/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+  ([1242](https://github.com/IBM/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+  ([1244](https://github.com/IBM/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+  ([1203](https://github.com/IBM/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+  ([1141](https://github.com/IBM/sarama/pull/1141)).
+
+## Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+   ([#1170](https://github.com/IBM/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+   ([#1152](https://github.com/IBM/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+   ([#1229](https://github.com/IBM/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+   ([#1201](https://github.com/IBM/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+   ([#1198](https://github.com/IBM/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+   ([#1173](https://github.com/IBM/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+   ([#1202](https://github.com/IBM/sarama/pull/1202)).
+ - Add error codes 61 to 72
+   ([#1195](https://github.com/IBM/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+   ([#1182](https://github.com/IBM/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+   ([#1209](https://github.com/IBM/sarama/pull/1209)).
+ - Fix typos in code and comments
+   ([#1228](https://github.com/IBM/sarama/pull/1228)).
+
+## Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+   ([#1099](https://github.com/IBM/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+   ([#1176](https://github.com/IBM/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+   ([#1174](https://github.com/IBM/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+   ([#1174](https://github.com/IBM/sarama/pull/1174)).
+
+## Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+   ([#1112](https://github.com/IBM/sarama/pull/1112)).
+ - Add customizable partitioner
+   ([#1118](https://github.com/IBM/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+   ([#1055](https://github.com/IBM/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+   ([#1149](https://github.com/IBM/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+   ([#1123](https://github.com/IBM/sarama/pull/1123)).
+ - Simpler offset management
+   ([#1127](https://github.com/IBM/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+   ([#1110](https://github.com/IBM/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+   expected topic/partition blocks
+   ([#1086](https://github.com/IBM/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+   ([#1115](https://github.com/IBM/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+   ([#1142](https://github.com/IBM/sarama/pull/1142)).
+ - Add version check when producing message with headers
+   ([#1117](https://github.com/IBM/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+   ([#1132](https://github.com/IBM/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+   ([#1125](https://github.com/IBM/sarama/pull/1125)).
+
+## Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+   ([#1044](https://github.com/IBM/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+   ([#1047](https://github.com/IBM/sarama/pull/1047),
+    [#1069](https://github.com/IBM/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+   ([#1098](https://github.com/IBM/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+   ([#1065](https://github.com/IBM/sarama/pull/1065),
+    [#1096](https://github.com/IBM/sarama/pull/1096),
+    [#1027](https://github.com/IBM/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+   ([#1063](https://github.com/IBM/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+   ([#1010](https://github.com/IBM/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+   ([#1049](https://github.com/IBM/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+   ([#1052](https://github.com/IBM/sarama/pull/1052)).
+ - Add support for Go 1.10
+   ([#1064](https://github.com/IBM/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+   ([#1073](https://github.com/IBM/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+   ([#1050](https://github.com/IBM/sarama/pull/1050),
+    [#1051](https://github.com/IBM/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+   ([#1033](https://github.com/IBM/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+   ([#1092](https://github.com/IBM/sarama/pull/1092)).
+
+## Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+   ([#1007](https://github.com/IBM/sarama/pull/1007),
+    [#1008](https://github.com/IBM/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+   ([#1009](https://github.com/IBM/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+   ([#1016](https://github.com/IBM/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+   ([#999](https://github.com/IBM/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+   ([#1019](https://github.com/IBM/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+   which results in much higher throughput in most cases
+   ([#1024](https://github.com/IBM/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+   reduce CPU and memory usage when processing many partitions
+   ([#1028](https://github.com/IBM/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+   recompression pass
+   ([#1002](https://github.com/IBM/sarama/pull/1002),
+    [#1015](https://github.com/IBM/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+   ([#1032](https://github.com/IBM/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+   ([#1005](https://github.com/IBM/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+   ([#1021](https://github.com/IBM/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+   ([#1022](https://github.com/IBM/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+   truncated before the magic value indicating its version
+   ([#1030](https://github.com/IBM/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+   ([#1035](https://github.com/IBM/sarama/pull/1035)).
+
+## Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+   ([#984](https://github.com/IBM/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+   ([#989](https://github.com/IBM/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+   ([#985](https://github.com/IBM/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+   ([#986](https://github.com/IBM/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+   ([#982](https://github.com/IBM/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+   ([#990](https://github.com/IBM/sarama/pull/990)).
+ - Fix producing with multiple headers
+   ([#996](https://github.com/IBM/sarama/pull/996)).
+ - Fix handling of truncated record batches
+   ([#998](https://github.com/IBM/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+   ([#991](https://github.com/IBM/sarama/pull/991)).
+
+## Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+   protocol and the necessary behavioural changes in the producer and consumer.
+   Transactions and idempotency are not yet supported, but producing and
+   consuming should work with all the existing bells and whistles (batching,
+   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+   of Arista Networks for this work. Part of
+   ([#901](https://github.com/IBM/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+   ([#970](https://github.com/IBM/sarama/pull/970)).
+ - Return partial replicas list when we have it
+   ([#975](https://github.com/IBM/sarama/pull/975)).
+
+## Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+   ([#905](https://github.com/IBM/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+   ([#939](https://github.com/IBM/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+   extremely large clusters
+   ([#937](https://github.com/IBM/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+   ([#932](https://github.com/IBM/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+   ([#885](https://github.com/IBM/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+   by the broker, which can be meaningful
+   ([#930](https://github.com/IBM/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+   variance in the actual timeout
+   ([#933](https://github.com/IBM/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+   ([#907](https://github.com/IBM/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+   ([#940](https://github.com/IBM/sarama/pull/940)).
+
+## Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+   version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note
+   that you still need to specify the Kafka version in the Sarama configuration
+   for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+   active brokers ([#813](https://github.com/IBM/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+   in-sync broker IDs for the given partition, now that the Kafka versions for
+   which this was misleading are no longer in our supported set
+   ([#872](https://github.com/IBM/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+   partitioner with a custom hash method in case the default (FNV-1a) is not
+   suitable
+   ([#837](https://github.com/IBM/sarama/pull/837),
+    [#841](https://github.com/IBM/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+   ([#859](https://github.com/IBM/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+   correct error ([#818](https://github.com/IBM/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+   transparent if you're using the `AddGroupProtocol` or
+   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+   ([#812](https://github.com/IBM/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+   ([#859](https://github.com/IBM/sarama/pull/859)).
+
+## Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+   ([#701](https://github.com/IBM/sarama/pull/701),
+    [#746](https://github.com/IBM/sarama/pull/746),
+    [#766](https://github.com/IBM/sarama/pull/766)).
+ - Add support for LZ4 compression
+   ([#786](https://github.com/IBM/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+   ([#775](https://github.com/IBM/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+   `HighWaterMarkOffset` values of its child topic/partitions
+   ([#769](https://github.com/IBM/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+   ([#759](https://github.com/IBM/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+   ([#756](https://github.com/IBM/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+   ([#787](https://github.com/IBM/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+   ([#790](https://github.com/IBM/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+   ([#795](https://github.com/IBM/sarama/pull/795)).
+
+## Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+   ([#717](https://github.com/IBM/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+   ([#722](https://github.com/IBM/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+   in v1.10.0. Go's timers are finicky
+   ([#730](https://github.com/IBM/sarama/pull/730),
+    [#733](https://github.com/IBM/sarama/pull/733),
+    [#734](https://github.com/IBM/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+   ([#735](https://github.com/IBM/sarama/pull/735)).
+
+## Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and
+[#713](https://github.com/IBM/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+   ([#672](https://github.com/IBM/sarama/pull/672),
+    [#678](https://github.com/IBM/sarama/pull/678),
+    [#681](https://github.com/IBM/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+   ([#676](https://github.com/IBM/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+   ([#677](https://github.com/IBM/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+   ([#667](https://github.com/IBM/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+   ([#634](https://github.com/IBM/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+   misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+   ([#707](https://github.com/IBM/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+   ([#664](https://github.com/IBM/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+   ([#685](https://github.com/IBM/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+   ([#693](https://github.com/IBM/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+   ([#705](https://github.com/IBM/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+   ([#706](https://github.com/IBM/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+   ([#709](https://github.com/IBM/sarama/pull/709)).
+
+## Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+   ([#602](https://github.com/IBM/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+   implementations ([#570](https://github.com/IBM/sarama/pull/570)).
+ - Declare support for Golang 1.6
+   ([#611](https://github.com/IBM/sarama/pull/611)).
+ - Support for SASL plain-text auth
+   ([#648](https://github.com/IBM/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+   ([#604](https://github.com/IBM/sarama/pull/604)).
+ - Documentation cleanup
+   ([#605](https://github.com/IBM/sarama/pull/605),
+    [#621](https://github.com/IBM/sarama/pull/621),
+    [#654](https://github.com/IBM/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+   ([#658](https://github.com/IBM/sarama/pull/658)).
+
+## Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+   - All protocol messages and fields
+   ([#586](https://github.com/IBM/sarama/pull/586),
+   [#588](https://github.com/IBM/sarama/pull/588),
+   [#590](https://github.com/IBM/sarama/pull/590)).
+   - Verified that TLS support works
+   ([#581](https://github.com/IBM/sarama/pull/581)).
+   - Fixed the OffsetManager compatibility
+   ([#585](https://github.com/IBM/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+   ([#584](https://github.com/IBM/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+   ([#589](https://github.com/IBM/sarama/pull/589)).
+
+## Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+   ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several
+   caveats:
+   - Protocol-layer support is mostly in place
+     ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9
+     renamed some messages and fields, which we did not in order to preserve API
+     compatibility.
+   - The producer and consumer work against 0.9, but the offset manager does
+     not ([#573](https://github.com/IBM/sarama/pull/573)).
+   - TLS support may or may not work
+     ([#581](https://github.com/IBM/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+   when the TCP connection is left hanging
+   ([#548](https://github.com/IBM/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+   solution to [#449](https://github.com/IBM/sarama/pull/449). It is also
+   slightly more efficient, and much more precise in calculating batch sizes
+   when compression is used
+   ([#549](https://github.com/IBM/sarama/pull/549),
+   [#550](https://github.com/IBM/sarama/pull/550),
+   [#551](https://github.com/IBM/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+   ([#553](https://github.com/IBM/sarama/pull/553)).
+
+## Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+   ([#449](https://github.com/IBM/sarama/pull/449)).
+
+## Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+   Kafka 0.8.2. The API is designed mainly for integration into a future
+   high-level consumer, not for direct use, although it is *possible* to use it
+   directly.
+   ([#461](https://github.com/IBM/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+   removing a major hotspot from most profiles
+   ([#255](https://github.com/IBM/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+   by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523),
+   [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways
+   ([#528](https://github.com/IBM/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+   ([#529](https://github.com/IBM/sarama/pull/529)).
+
+## Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+   to change when Kafka releases built-in TLS support, but for now this is
+   enough to work with TLS-terminating proxies
+   ([#154](https://github.com/IBM/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+   all other partitions will continue to consume normally
+   ([#485](https://github.com/IBM/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+   ([#495](https://github.com/IBM/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+   future work ([#300](https://github.com/IBM/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+   ([#475](https://github.com/IBM/sarama/pull/475)).
+
+## Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+   circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+   the producer ([#468](https://github.com/IBM/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+   changed slightly ([#486](https://github.com/IBM/sarama/pull/486)).
+
+## Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+   ([#456](https://github.com/IBM/sarama/pull/456)).
+
+## Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+   ([#446](https://github.com/IBM/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+   ([#450](https://github.com/IBM/sarama/pull/450),
+   [#451](https://github.com/IBM/sarama/pull/451)).
+
+## Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+   users to dynamically choose what topics/partitions to consume without
+   instantiating a full client
+   ([#431](https://github.com/IBM/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+   ([#439](https://github.com/IBM/sarama/pull/439),
+   [#442](https://github.com/IBM/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+   useful, and slightly less verbose
+   ([#429](https://github.com/IBM/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+   thundering herd on the first broker in the list
+   ([#441](https://github.com/IBM/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+   shutting down, fixing several instances of confusing behaviour and at least
+   one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+   making it much more resilient to specific user code ordering
+   ([#325](https://github.com/IBM/sarama/pull/325)).
+
+## Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+   ConsumerMetadataRequests similar to how it tracks partition leadership using
+   regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)).
+   This adds two methods to the client API:
+   - `Coordinator(consumerGroup string) (*Broker, error)`
+   - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+   ID/address/port combination for the Coordinator; accessing the fields
+   individually has been deprecated
+   ([#413](https://github.com/IBM/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+   Consumers will fail to start if the provided offset is out of range
+   ([#418](https://github.com/IBM/sarama/pull/418))
+   and they will automatically shut down if the offset falls out of range
+   ([#424](https://github.com/IBM/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+   ([#427](https://github.com/IBM/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+   it happens to be activated while the client is being closed
+   ([#422](https://github.com/IBM/sarama/pull/422)).
+
+## Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+   ([#389](https://github.com/IBM/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+   messages due to an improved queue implementation
+   ([#396](https://github.com/IBM/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+   changes ([#385](https://github.com/IBM/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+   retry once in the event of stale information or similar
+   ([#394](https://github.com/IBM/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+   ([#407](https://github.com/IBM/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+   API versions ([#390](https://github.com/IBM/sarama/pull/390),
+   [#400](https://github.com/IBM/sarama/pull/400)).
+
+## Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+   broken topics don't choke throughput
+   ([#373](https://github.com/IBM/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+   ([#367](https://github.com/IBM/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+   ([#369](https://github.com/IBM/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+   gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+   metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)).
+
+
+## Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..8470ec5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+  and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+  overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+  advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+  address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+dominic.evans@uk.ibm.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior,  harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
new file mode 100644
index 0000000..bb88127
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+[fork]: https://github.com/IBM/sarama/fork
+[pr]: https://github.com/IBM/sarama/compare
+[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license
+
+Hi there! We are thrilled that you would like to contribute to Sarama.
+Contributions are always welcome, both reporting issues and submitting pull requests!
+
+## Reporting issues
+
+Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
+
+- What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version.
+- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
+- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
+
+Also, please include the following information about your environment, so we can help you faster:
+
+- What version of Kafka are you using?
+- What version of Go are you using?
+- What are the values of your Producer/Consumer/Client configuration?
+
+
+## Contributing a change
+
+Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md).
+By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO).
+The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes.
+
+Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author:
+
+```
+feat: this is my commit message
+
+Signed-off-by: Random J Developer <random@developer.example.org>
+```
+
+Git even has a `-s` command line option to append this automatically to your
+commit message:
+
+```
+$ git commit -s -m 'This is my commit message'
+```
+
+Because this library is in production use by many people and applications, we code review all additions.
+To make the review process go as smooth as possible, please consider the following.
+
+- If you plan to work on something major, please open an issue to discuss the design first.
+- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
+- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
+- Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs.
+- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
+- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
+- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
+- Make sure your code is supported by all the Go versions we support.
+  You can rely on GitHub Actions for testing older Go versions.
+
+## Submitting a pull request
+
+0. [Fork][fork] and clone the repository
+1. Create a new branch: `git checkout -b my-branch-name`
+2. Make your change, push to your fork and [submit a pull request][pr]
+3. Wait for your pull request to be reviewed and merged.
+
+Here are a few things you can do that will increase the likelihood of your pull request being accepted:
+
+- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
+- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
+
+## Further Reading
+
+- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/)
+- [The most powerful contributor agreement](https://lwn.net/Articles/592503/)
+- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
+- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
+- [GitHub Help](https://help.github.com)
diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka
new file mode 100644
index 0000000..bb18b6c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka
@@ -0,0 +1,55 @@
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6@sha256:7c5495d5fad59aaee12abc3cbbd2b283818ee1e814b00dbc7f25bf2d14fa4f0c
+
+USER root
+
+RUN microdnf update -y \
+ && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \
+ && microdnf reinstall -y tzdata \
+ && microdnf clean all
+
+ENV JAVA_HOME=/usr/lib/jvm/jre-17
+
+# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html
+# Ensure Java doesn't cache any dns results
+RUN cd /etc/java/java-17-openjdk/*/conf/security \
+ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \
+ && echo 'networkaddress.cache.ttl=0' >> java.security \
+ && echo 'networkaddress.cache.negative.ttl=0' >> java.security
+
+ARG SCALA_VERSION="2.13"
+ARG KAFKA_VERSION="3.9.1"
+
+WORKDIR /tmp
+
+# https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105
+ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages"
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN --mount=type=bind,target=.,rw=true \
+    mkdir -p "/opt/kafka-${KAFKA_VERSION}" \
+ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \
+ && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}"
+
+# older kafka versions depend upon jaxb-api being bundled with the JDK, but it
+# was removed from Java 11 so work around that by including it in the kafka
+# libs dir regardless
+RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \
+ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \
+ && rm -f jaxb-api-2.3.0.jar
+
+# older kafka versions with the zookeeper 3.4.13/3.4.14 client aren't compatible with Java 17 so quietly bump them to 3.5.9
+RUN if ! stat /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar; then exit 0; fi ; \
+    rm -f /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar"
+
+WORKDIR /opt/kafka-${KAFKA_VERSION}
+
+ENV JAVA_MAJOR_VERSION=17
+
+RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh
+
+COPY entrypoint.sh /
+
+USER 65534:65534
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/vendor/github.com/IBM/sarama/LICENSE.md b/vendor/github.com/IBM/sarama/LICENSE.md
new file mode 100644
index 0000000..f8f64d4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/LICENSE.md
@@ -0,0 +1,24 @@
+# MIT License
+
+Copyright (c) 2013 Shopify
+
+Copyright (c) 2023 IBM Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile
new file mode 100644
index 0000000..c41445c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Makefile
@@ -0,0 +1,47 @@
+default: fmt get update test lint
+
+GO       := go
+GOBIN    := $(shell pwd)/bin
+GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
+
+GOTESTSUM         := $(GOBIN)/gotestsum
+GOTESTSUM_VERSION := v1.13.0
+$(GOTESTSUM):
+	GOBIN=$(GOBIN) go install gotest.tools/gotestsum@$(GOTESTSUM_VERSION)
+
+TESTSTAT         := $(GOBIN)/teststat
+TESTSTAT_VERSION := v0.1.26
+$(TESTSTAT):
+	GOBIN=$(GOBIN) go install github.com/vearutop/teststat@$(TESTSTAT_VERSION)
+
+FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
+TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
+
+get:
+	$(GO) get ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+update:
+	$(GO) get -u -v ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+fmt:
+	gofmt -s -l -w $(FILES) $(TESTS)
+
+lint:
+	GOFLAGS="-tags=functional" golangci-lint run
+
+test: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+	@$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/unittests.json --junitfile _test/unittests.xml \
+		--rerun-fails --packages="./..." \
+		-- -v -race -coverprofile=profile.out -covermode=atomic -timeout 2m
+	@$(TESTSTAT) _test/unittests.json
+
+.PHONY: test_functional
+test_functional: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+	@$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/fvt.json --junitfile _test/fvt.xml \
+		--rerun-fails --packages="./..." \
+		-- -v -race -coverprofile=profile.out -covermode=atomic -timeout 15m -tags=functional
+	@$(TESTSTAT) _test/fvt.json
diff --git a/vendor/github.com/IBM/sarama/README.md b/vendor/github.com/IBM/sarama/README.md
new file mode 100644
index 0000000..4534d7b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/README.md
@@ -0,0 +1,35 @@
+# sarama
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/IBM/sarama/badge?style=flat)](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama)
+[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7996/badge)](https://www.bestpractices.dev/projects/7996)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/).
+
+## Getting started
+
+- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions).
+
+## Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. However, older releases of Kafka are still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the standard Go
+[module version numbering](https://go.dev/doc/modules/version-numbers) scheme.
+
+A changelog is available [here](CHANGELOG.md).
+
+## Contributing
+
+- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/IBM/sarama/SECURITY.md b/vendor/github.com/IBM/sarama/SECURITY.md
new file mode 100644
index 0000000..b2f6e61
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/SECURITY.md
@@ -0,0 +1,11 @@
+# Security
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new).
+
+See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions.
+
+Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages.
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile
similarity index 100%
rename from vendor/github.com/Shopify/sarama/Vagrantfile
rename to vendor/github.com/IBM/sarama/Vagrantfile
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_bindings.go
rename to vendor/github.com/IBM/sarama/acl_bindings.go
diff --git a/vendor/github.com/IBM/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go
new file mode 100644
index 0000000..187cee0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// CreateAclsRequest is an acl creation request
+type CreateAclsRequest struct {
+	Version      int16
+	AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+		return err
+	}
+
+	for _, aclCreation := range c.AclCreations {
+		if err := aclCreation.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreations = make([]*AclCreation, n)
+
+	for i := 0; i < n; i++ {
+		c.AclCreations[i] = new(AclCreation)
+		if err := c.AclCreations[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) key() int16 {
+	return apiKeyCreateAcls
+}
+
+func (c *CreateAclsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (c *CreateAclsRequest) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+// AclCreation is a wrapper around Resource and Acl type
+type AclCreation struct {
+	Resource
+	Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder, version int16) error {
+	if err := a.Resource.encode(pe, version); err != nil {
+		return err
+	}
+	if err := a.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+	if err := a.Resource.decode(pd, version); err != nil {
+		return err
+	}
+	if err := a.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go
new file mode 100644
index 0000000..514fc64
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import "time"
+
+// CreateAclsResponse is a an acl response creation type
+type CreateAclsResponse struct {
+	Version              int16
+	ThrottleTime         time.Duration
+	AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(c.ThrottleTime)
+
+	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+		return err
+	}
+
+	for _, aclCreationResponse := range c.AclCreationResponses {
+		if err := aclCreationResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.ThrottleTime, err = pd.getDurationMs()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreationResponses = make([]*AclCreationResponse, n)
+	for i := 0; i < n; i++ {
+		c.AclCreationResponses[i] = new(AclCreationResponse)
+		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) key() int16 {
+	return apiKeyCreateAcls
+}
+
+func (c *CreateAclsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (c *CreateAclsResponse) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *CreateAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// AclCreationResponse is an acl creation response type
+type AclCreationResponse struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+	pe.putKError(a.Err)
+
+	if err := pe.putNullableString(a.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+	a.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if a.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go
new file mode 100644
index 0000000..ff40740
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+// DeleteAclsRequest is a delete acl request
+type DeleteAclsRequest struct {
+	Version int
+	Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) setVersion(v int16) {
+	d.Version = int(v)
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Filters)); err != nil {
+		return err
+	}
+
+	for _, filter := range d.Filters {
+		filter.Version = d.Version
+		if err := filter.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.Filters = make([]*AclFilter, n)
+	for i := 0; i < n; i++ {
+		d.Filters[i] = new(AclFilter)
+		d.Filters[i].Version = int(version)
+		if err := d.Filters[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+	return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DeleteAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DeleteAclsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go
new file mode 100644
index 0000000..bc6637e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_response.go
@@ -0,0 +1,176 @@
+package sarama
+
+import "time"
+
+// DeleteAclsResponse is a delete acl response
+type DeleteAclsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	FilterResponses []*FilterResponse
+}
+
+func (d *DeleteAclsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+
+	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
+		return err
+	}
+
+	for _, filterResponse := range d.FilterResponses {
+		if err := filterResponse.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.FilterResponses = make([]*FilterResponse, n)
+
+	for i := 0; i < n; i++ {
+		d.FilterResponses[i] = new(FilterResponse)
+		if err := d.FilterResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+	return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DeleteAclsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DeleteAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// FilterResponse is a filter response type
+type FilterResponse struct {
+	Err          KError
+	ErrMsg       *string
+	MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
+	pe.putKError(f.Err)
+	if err := pe.putNullableString(f.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+		return err
+	}
+	for _, matchingAcl := range f.MatchingAcls {
+		if err := matchingAcl.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+	f.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if f.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	f.MatchingAcls = make([]*MatchingAcl, n)
+	for i := 0; i < n; i++ {
+		f.MatchingAcls[i] = new(MatchingAcl)
+		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// MatchingAcl is a matching acl type
+type MatchingAcl struct {
+	Err    KError
+	ErrMsg *string
+	Resource
+	Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
+	pe.putKError(m.Err)
+	if err := pe.putNullableString(m.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := m.Resource.encode(pe, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+	m.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if m.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if err := m.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go
new file mode 100644
index 0000000..6eb218f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+// DescribeAclsRequest is a describe acl request type
+type DescribeAclsRequest struct {
+	Version int
+	AclFilter
+}
+
+func (d *DescribeAclsRequest) setVersion(v int16) {
+	d.Version = int(v)
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+	d.AclFilter.Version = d.Version
+	return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	d.AclFilter.Version = int(version)
+	return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+	return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DescribeAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DescribeAclsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go
new file mode 100644
index 0000000..c9e2552
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_response.go
@@ -0,0 +1,100 @@
+package sarama
+
+import "time"
+
+// DescribeAclsResponse is a describe acl response type
+type DescribeAclsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+	pe.putKError(d.Err)
+
+	if err := pe.putNullableString(d.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+		return err
+	}
+
+	for _, resourceAcl := range d.ResourceAcls {
+		if err := resourceAcl.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	d.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	errmsg, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	if errmsg != "" {
+		d.ErrMsg = &errmsg
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.ResourceAcls = make([]*ResourceAcls, n)
+
+	for i := 0; i < n; i++ {
+		d.ResourceAcls[i] = new(ResourceAcls)
+		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+	return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DescribeAclsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DescribeAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_filter.go
rename to vendor/github.com/IBM/sarama/acl_filter.go
diff --git a/vendor/github.com/IBM/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go
new file mode 100644
index 0000000..62bb534
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_types.go
@@ -0,0 +1,238 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	AclOperation int
+
+	AclPermissionType int
+
+	AclResourceType int
+
+	AclResourcePatternType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+	AclOperationUnknown AclOperation = iota
+	AclOperationAny
+	AclOperationAll
+	AclOperationRead
+	AclOperationWrite
+	AclOperationCreate
+	AclOperationDelete
+	AclOperationAlter
+	AclOperationDescribe
+	AclOperationClusterAction
+	AclOperationDescribeConfigs
+	AclOperationAlterConfigs
+	AclOperationIdempotentWrite
+)
+
+func (a *AclOperation) String() string {
+	mapping := map[AclOperation]string{
+		AclOperationUnknown:         "Unknown",
+		AclOperationAny:             "Any",
+		AclOperationAll:             "All",
+		AclOperationRead:            "Read",
+		AclOperationWrite:           "Write",
+		AclOperationCreate:          "Create",
+		AclOperationDelete:          "Delete",
+		AclOperationAlter:           "Alter",
+		AclOperationDescribe:        "Describe",
+		AclOperationClusterAction:   "ClusterAction",
+		AclOperationDescribeConfigs: "DescribeConfigs",
+		AclOperationAlterConfigs:    "AlterConfigs",
+		AclOperationIdempotentWrite: "IdempotentWrite",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclOperationUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclOperation (name without prefix)
+func (a *AclOperation) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the operation and converts it to an AclOperation
+func (a *AclOperation) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclOperation{
+		"unknown":         AclOperationUnknown,
+		"any":             AclOperationAny,
+		"all":             AclOperationAll,
+		"read":            AclOperationRead,
+		"write":           AclOperationWrite,
+		"create":          AclOperationCreate,
+		"delete":          AclOperationDelete,
+		"alter":           AclOperationAlter,
+		"describe":        AclOperationDescribe,
+		"clusteraction":   AclOperationClusterAction,
+		"describeconfigs": AclOperationDescribeConfigs,
+		"alterconfigs":    AclOperationAlterConfigs,
+		"idempotentwrite": AclOperationIdempotentWrite,
+	}
+	ao, ok := mapping[normalized]
+	if !ok {
+		*a = AclOperationUnknown
+		return fmt.Errorf("no acl operation with name %s", normalized)
+	}
+	*a = ao
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+	AclPermissionUnknown AclPermissionType = iota
+	AclPermissionAny
+	AclPermissionDeny
+	AclPermissionAllow
+)
+
+func (a *AclPermissionType) String() string {
+	mapping := map[AclPermissionType]string{
+		AclPermissionUnknown: "Unknown",
+		AclPermissionAny:     "Any",
+		AclPermissionDeny:    "Deny",
+		AclPermissionAllow:   "Allow",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPermissionUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclPermissionType (name without prefix)
+func (a *AclPermissionType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType
+func (a *AclPermissionType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclPermissionType{
+		"unknown": AclPermissionUnknown,
+		"any":     AclPermissionAny,
+		"deny":    AclPermissionDeny,
+		"allow":   AclPermissionAllow,
+	}
+
+	apt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPermissionUnknown
+		return fmt.Errorf("no acl permission with name %s", normalized)
+	}
+	*a = apt
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+	AclResourceUnknown AclResourceType = iota
+	AclResourceAny
+	AclResourceTopic
+	AclResourceGroup
+	AclResourceCluster
+	AclResourceTransactionalID
+	AclResourceDelegationToken
+)
+
+func (a *AclResourceType) String() string {
+	mapping := map[AclResourceType]string{
+		AclResourceUnknown:         "Unknown",
+		AclResourceAny:             "Any",
+		AclResourceTopic:           "Topic",
+		AclResourceGroup:           "Group",
+		AclResourceCluster:         "Cluster",
+		AclResourceTransactionalID: "TransactionalID",
+		AclResourceDelegationToken: "DelegationToken",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclResourceUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourceType (name without prefix)
+func (a *AclResourceType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType
+func (a *AclResourceType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourceType{
+		"unknown":         AclResourceUnknown,
+		"any":             AclResourceAny,
+		"topic":           AclResourceTopic,
+		"group":           AclResourceGroup,
+		"cluster":         AclResourceCluster,
+		"transactionalid": AclResourceTransactionalID,
+		"delegationtoken": AclResourceDelegationToken,
+	}
+
+	art, ok := mapping[normalized]
+	if !ok {
+		*a = AclResourceUnknown
+		return fmt.Errorf("no acl resource with name %s", normalized)
+	}
+	*a = art
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+const (
+	AclPatternUnknown AclResourcePatternType = iota
+	AclPatternAny
+	AclPatternMatch
+	AclPatternLiteral
+	AclPatternPrefixed
+)
+
+func (a *AclResourcePatternType) String() string {
+	mapping := map[AclResourcePatternType]string{
+		AclPatternUnknown:  "Unknown",
+		AclPatternAny:      "Any",
+		AclPatternMatch:    "Match",
+		AclPatternLiteral:  "Literal",
+		AclPatternPrefixed: "Prefixed",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPatternUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
+func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType
+func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourcePatternType{
+		"unknown":  AclPatternUnknown,
+		"any":      AclPatternAny,
+		"match":    AclPatternMatch,
+		"literal":  AclPatternLiteral,
+		"prefixed": AclPatternPrefixed,
+	}
+
+	arpt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPatternUnknown
+		return fmt.Errorf("no acl resource pattern with name %s", normalized)
+	}
+	*a = arpt
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..4361abb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+// AddOffsetsToTxnRequest adds offsets to a transaction request
+type AddOffsetsToTxnRequest struct {
+	Version         int16
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	GroupID         string
+}
+
+func (a *AddOffsetsToTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putString(a.GroupID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+	return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AddOffsetsToTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..17858a1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,68 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AddOffsetsToTxnResponse is a response type for adding offsets to txns
+type AddOffsetsToTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (a *AddOffsetsToTxnResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+	pe.putKError(a.Err)
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	a.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+	return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
+
+func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..492023b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// AddPartitionsToTxnRequest is a add partition request
+type AddPartitionsToTxnRequest struct {
+	Version         int16
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt64(a.ProducerID)
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range a.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.TopicPartitions = make(map[string][]int32)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		a.TopicPartitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+	return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AddPartitionsToTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..0384263
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,132 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AddPartitionsToTxnResponse is a partition errors to transaction type
+type AddPartitionsToTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Errors       map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+	if err := pe.putArrayLength(len(a.Errors)); err != nil {
+		return err
+	}
+
+	for topic, e := range a.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	a.Version = version
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Errors = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		a.Errors[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			a.Errors[topic][j] = new(PartitionError)
+			if err := a.Errors[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+	return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// PartitionError is a partition error type
+type PartitionError struct {
+	Partition int32
+	Err       KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putKError(p.Err)
+	return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	p.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go
new file mode 100644
index 0000000..dda0201
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/admin.go
@@ -0,0 +1,1395 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"maps"
+	"math/rand"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+	// It may take several seconds after CreateTopic returns success for all the brokers
+	// to become aware that the topic has been created. During this time, listTopics
+	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+	// List the topics available in the cluster with the default options.
+	ListTopics() (map[string]TopicDetail, error)
+
+	// Describe some topics in the cluster.
+	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
+
+	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
+	// and for all the brokers to become aware that the topics are gone.
+	// During this time, listTopics  may continue to return information about the deleted topic.
+	// If delete.topic.enable is false on the brokers, deleteTopic will mark
+	// the topic for deletion, but not actually delete them.
+	// This operation is supported by brokers with version 0.10.1.0 or higher.
+	DeleteTopic(topic string) error
+
+	// Increase the number of partitions of the topics  according to the corresponding values.
+	// If partitions are increased for a topic that has a key, the partition logic or ordering of
+	// the messages will be affected. It may take several seconds after this method returns
+	// success for all the brokers to become aware that the partitions have been created.
+	// During this time, ClusterAdmin#describeTopics may not return information about the
+	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+	// Alter the replica assignment for partitions.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	AlterPartitionReassignments(topic string, assignment [][]int32) error
+
+	// Provides info on ongoing partitions replica reassignments.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
+
+	// Delete records whose offset is smaller than the given offset of the corresponding partition.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+	// Get the configuration for the specified resources.
+	// The returned configuration includes default values and the Default is true
+	// can be used to distinguish them from user supplied values.
+	// Config entries where ReadOnly is true cannot be updated.
+	// The value of config entries where Sensitive is true is always nil so
+	// sensitive information is not disclosed.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+	// Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	// The resources with their configs (topic is the only resource type with configs
+	// that can be updated currently Updates are not transactional so they may succeed
+	// for some resources while fail for others. The configs for a particular resource are updated automatically.
+	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+	// IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 2.3.0.0 or higher.
+	// Updates are not transactional so they may succeed for some resources while fail for others.
+	// The configs for a particular resource are updated automatically.
+	IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error
+
+	// Creates an access control list (ACL) which is bound to a specific resource.
+	// This operation is not transactional so it may succeed or fail.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	// Deprecated: Use CreateACLs instead.
+	CreateACL(resource Resource, acl Acl) error
+
+	// Creates access control lists (ACLs) which are bound to specific resources.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	CreateACLs([]*ResourceAcls) error
+
+	// Lists access control lists (ACLs) according to the supplied filter.
+	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+	// Deletes access control lists (ACLs) according to the supplied filters.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+	// ElectLeaders allows to trigger the election of preferred leaders for a set of partitions.
+	ElectLeaders(ElectionType, map[string][]int32) (map[string]map[int32]*PartitionResult, error)
+
+	// List the consumer groups available in the cluster.
+	ListConsumerGroups() (map[string]string, error)
+
+	// Describe the given consumer groups.
+	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
+
+	// List the consumer group offsets available in the cluster.
+	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
+
+	// Deletes a consumer group offset
+	DeleteConsumerGroupOffset(group string, topic string, partition int32) error
+
+	// Delete a consumer group.
+	DeleteConsumerGroup(group string) error
+
+	// Get information about the nodes in the cluster
+	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
+
+	// Get information about all log directories on the given set of brokers
+	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
+
+	// Get information about SCRAM users
+	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
+
+	// Delete SCRAM users
+	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
+
+	// Upsert SCRAM users
+	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
+
+	// Get client quota configurations corresponding to the specified filter.
+	// This operation is supported by brokers with version 2.6.0.0 or higher.
+	DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error)
+
+	// Alters client quota configurations with the specified alterations.
+	// This operation is supported by brokers with version 2.6.0.0 or higher.
+	AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error
+
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available.
+	Controller() (*Broker, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available.
+	Coordinator(group string) (*Broker, error)
+
+	// Remove members from the consumer group by given member identities.
+	// This operation is supported by brokers with version 2.3 or higher
+	// This is for static membership feature. KIP-345
+	RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error)
+
+	// Close shuts down the admin and closes underlying client.
+	Close() error
+}
+
+type clusterAdmin struct {
+	client Client
+	conf   *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	admin, err := NewClusterAdminFromClient(client)
+	if err != nil {
+		client.Close()
+	}
+	return admin, err
+}
+
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+	// make sure we can retrieve the controller
+	_, err := client.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	ca := &clusterAdmin{
+		client: client,
+		conf:   client.Config(),
+	}
+	return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+	return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+	return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) {
+	return ca.client.Coordinator(group)
+}
+
+func (ca *clusterAdmin) refreshController() (*Broker, error) {
+	return ca.client.RefreshController()
+}
+
+// isRetriableControllerError returns `true` if the given error type unwraps to
+// an `ErrNotController` or `EOF` response from Kafka
+func isRetriableControllerError(err error) bool {
+	return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF)
+}
+
+// isRetriableGroupCoordinatorError returns `true` if the given error type
+// unwraps to an `ErrNotCoordinatorForConsumer`,
+// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka
+func isRetriableGroupCoordinatorError(err error) bool {
+	return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF)
+}
+
+// retryOnError will repeatedly call the given (error-returning) func in the
+// case that its response is non-nil and retryable (as determined by the
+// provided retryable func) up to the maximum number of tries permitted by
+// the admin client configuration
+func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
+	for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; {
+		err := fn()
+		attemptsRemaining--
+		if err == nil || attemptsRemaining <= 0 || !retryable(err) {
+			return err
+		}
+		Logger.Printf(
+			"admin/request retrying after %dms... (%d attempts remaining)\n",
+			ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining)
+		time.Sleep(ca.conf.Admin.Retry.Backoff)
+	}
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	if detail == nil {
+		return errors.New("you must specify topic details")
+	}
+
+	topicDetails := map[string]*TopicDetail{
+		topic: detail,
+	}
+
+	request := NewCreateTopicsRequest(
+		ca.conf.Version,
+		topicDetails,
+		ca.conf.Admin.Timeout,
+		validateOnly,
+	)
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreateTopics(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr.Err, ErrNoError) {
+			if isRetriableControllerError(topicErr.Err) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
+	var response *MetadataResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		controller, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		request := NewMetadataRequest(ca.conf.Version, topics)
+		response, err = controller.GetMetadata(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return response.Topics, nil
+}
+
+func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
+	var response *MetadataResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		controller, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		request := NewMetadataRequest(ca.conf.Version, nil)
+		response, err = controller.GetMetadata(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+	if err != nil {
+		return nil, int32(0), err
+	}
+
+	return response.Brokers, response.ControllerID, nil
+}
+
+func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
+	brokers := ca.client.Brokers()
+	for _, b := range brokers {
+		if b.ID() == id {
+			return b, nil
+		}
+	}
+	return nil, fmt.Errorf("could not find broker id %d", id)
+}
+
+func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
+	brokers := ca.client.Brokers()
+	if len(brokers) > 0 {
+		index := rand.Intn(len(brokers))
+		return brokers[index], nil
+	}
+	return nil, errors.New("no available broker")
+}
+
+func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
+	// In order to build TopicDetails we need to first get the list of all
+	// topics using a MetadataRequest and then get their configs using a
+	// DescribeConfigsRequest request. To avoid sending many requests to the
+	// broker, we use a single DescribeConfigsRequest.
+
+	// Send the all-topic MetadataRequest
+	b, err := ca.findAnyBroker()
+	if err != nil {
+		return nil, err
+	}
+	_ = b.Open(ca.client.Config())
+
+	metadataReq := NewMetadataRequest(ca.conf.Version, nil)
+	metadataResp, err := b.GetMetadata(metadataReq)
+	if err != nil {
+		return nil, err
+	}
+
+	topicsDetailsMap := make(map[string]TopicDetail, len(metadataResp.Topics))
+
+	var describeConfigsResources []*ConfigResource
+
+	for _, topic := range metadataResp.Topics {
+		topicDetails := TopicDetail{
+			NumPartitions: int32(len(topic.Partitions)),
+		}
+		if len(topic.Partitions) > 0 {
+			topicDetails.ReplicaAssignment = make(map[int32][]int32, len(topic.Partitions))
+			for _, partition := range topic.Partitions {
+				topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
+			}
+			topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
+		}
+		topicsDetailsMap[topic.Name] = topicDetails
+
+		// we populate the resources we want to describe from the MetadataResponse
+		topicResource := ConfigResource{
+			Type: TopicResource,
+			Name: topic.Name,
+		}
+		describeConfigsResources = append(describeConfigsResources, &topicResource)
+	}
+
+	// Send the DescribeConfigsRequest
+	describeConfigsReq := &DescribeConfigsRequest{
+		Resources: describeConfigsResources,
+	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		describeConfigsReq.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		describeConfigsReq.Version = 2
+	}
+
+	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, resource := range describeConfigsResp.Resources {
+		topicDetails := topicsDetailsMap[resource.Name]
+		topicDetails.ConfigEntries = make(map[string]*string)
+
+		for _, entry := range resource.Configs {
+			// only include non-default non-sensitive config
+			// (don't actually think topic config will ever be sensitive)
+			if entry.Default || entry.Sensitive {
+				continue
+			}
+			topicDetails.ConfigEntries[entry.Name] = &entry.Value
+		}
+
+		topicsDetailsMap[resource.Name] = topicDetails
+	}
+
+	return topicsDetailsMap, nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &DeleteTopicsRequest{
+		Topics:  []string{topic},
+		Timeout: ca.conf.Admin.Timeout,
+	}
+
+	// Versions 0, 1, 2, and 3 are the same.
+	// Version 4 is first flexible version.
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 4
+	} else if ca.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 3
+	} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.DeleteTopics(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicErrorCodes[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr, ErrNoError) {
+			if errors.Is(topicErr, ErrNotController) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	topicPartitions := map[string]*TopicPartition{
+		topic: {
+			Count:      count,
+			Assignment: assignment,
+		},
+	}
+
+	request := &CreatePartitionsRequest{
+		TopicPartitions: topicPartitions,
+		Timeout:         ca.conf.Admin.Timeout,
+		ValidateOnly:    validateOnly,
+	}
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreatePartitions(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicPartitionErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr.Err, ErrNoError) {
+			if errors.Is(topicErr.Err, ErrNotController) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &AlterPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	for i := 0; i < len(assignment); i++ {
+		request.AddBlock(topic, int32(i), assignment[i])
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		errs := make([]error, 0)
+
+		rsp, err := b.AlterPartitionReassignments(request)
+
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if rsp.ErrorCode > 0 {
+				errs = append(errs, rsp.ErrorCode)
+			}
+
+			for topic, topicErrors := range rsp.Errors {
+				for partition, partitionError := range topicErrors {
+					if !errors.Is(partitionError.errorCode, ErrNoError) {
+						errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode))
+					}
+				}
+			}
+		}
+
+		if len(errs) > 0 {
+			return Wrap(ErrReassignPartitions, errs...)
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
+	if topic == "" {
+		return nil, ErrInvalidTopic
+	}
+
+	request := &ListPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	request.AddBlock(topic, partitions)
+
+	var rsp *ListPartitionReassignmentsResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		_ = b.Open(ca.client.Config())
+
+		rsp, err = b.ListPartitionReassignments(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+
+	if err == nil && rsp != nil {
+		return rsp.TopicStatus, nil
+	} else {
+		return nil, err
+	}
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+	errs := make([]error, 0)
+	partitionPerBroker := make(map[*Broker][]int32)
+	for partition := range partitionOffsets {
+		broker, err := ca.client.Leader(topic, partition)
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+	}
+	for broker, partitions := range partitionPerBroker {
+		recordsToDelete := make(map[int32]int64, len(partitions))
+		for _, p := range partitions {
+			recordsToDelete[p] = partitionOffsets[p]
+		}
+		topics := map[string]*DeleteRecordsRequestTopic{
+			topic: {
+				PartitionOffsets: recordsToDelete,
+			},
+		}
+		request := &DeleteRecordsRequest{
+			Topics:  topics,
+			Timeout: ca.conf.Admin.Timeout,
+		}
+		if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+			request.Version = 1
+		}
+		rsp, err := broker.DeleteRecords(request)
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+
+		deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+		if !ok {
+			errs = append(errs, ErrIncompleteResponse)
+			continue
+		}
+
+		for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+			if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) {
+				errs = append(errs, deleteRecordsResponsePartition.Err)
+				continue
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return Wrap(ErrDeleteRecords, errs...)
+	}
+	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	// for each partition instead of one error
+	return nil
+}
+
+// Returns a bool indicating whether the resource request needs to go to a
+// specific broker
+func dependsOnSpecificNode(resource ConfigResource) bool {
+	return (resource.Type == BrokerResource && resource.Name != "") ||
+		resource.Type == BrokerLoggerResource
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+	var entries []ConfigEntry
+	var resources []*ConfigResource
+	resources = append(resources, &resource)
+
+	request := &DescribeConfigsRequest{
+		Resources: resources,
+	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// DescribeConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(resource) {
+		var id int64
+		id, err = strconv.ParseInt(resource.Name, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.DescribeConfigs(request)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == resource.Name {
+			if rspResource.ErrorCode != 0 {
+				return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+			}
+			for _, cfgEntry := range rspResource.Configs {
+				entries = append(entries, *cfgEntry)
+			}
+		}
+	}
+	return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+	var resources []*AlterConfigsResource
+	resources = append(resources, &AlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &AlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.AlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorCode != 0 {
+				return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error {
+	var resources []*IncrementalAlterConfigsResource
+	resources = append(resources, &IncrementalAlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &IncrementalAlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 1
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.IncrementalAlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorMsg != "" {
+				return errors.New(rspResource.ErrorMsg)
+			}
+			if rspResource.ErrorCode != 0 {
+				return KError(rspResource.ErrorCode)
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+	var acls []*AclCreation
+	acls = append(acls, &AclCreation{resource, acl})
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error {
+	var acls []*AclCreation
+	for _, resourceACL := range resourceACLs {
+		for _, acl := range resourceACL.Acls {
+			acls = append(acls, &AclCreation{resourceACL.Resource, *acl})
+		}
+	}
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+	request := &DescribeAclsRequest{AclFilter: filter}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var lAcls []ResourceAcls
+	for _, rAcl := range rsp.ResourceAcls {
+		lAcls = append(lAcls, *rAcl)
+	}
+	return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+	var filters []*AclFilter
+	filters = append(filters, &filter)
+	request := &DeleteAclsRequest{Filters: filters}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DeleteAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var mAcls []MatchingAcl
+	for _, fr := range rsp.FilterResponses {
+		for _, mACL := range fr.MatchingAcls {
+			mAcls = append(mAcls, *mACL)
+		}
+	}
+	return mAcls, nil
+}
+
+func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[string][]int32) (map[string]map[int32]*PartitionResult, error) {
+	request := &ElectLeadersRequest{
+		Type:            electionType,
+		TopicPartitions: partitions,
+		TimeoutMs:       int32(60000),
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	var res *ElectLeadersResponse
+	if err := ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		_ = b.Open(ca.client.Config())
+
+		res, err = b.ElectLeaders(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(res.ErrorCode, ErrNoError) {
+			if isRetriableControllerError(res.ErrorCode) {
+				_, _ = ca.refreshController()
+			}
+			return res.ErrorCode
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	return res.ReplicaElectionResults, nil
+}
+
+func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
+	groupsPerBroker := make(map[*Broker][]string)
+
+	for _, group := range groups {
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return nil, err
+		}
+		groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group)
+	}
+
+	for broker, brokerGroups := range groupsPerBroker {
+		describeReq := &DescribeGroupsRequest{
+			Groups: brokerGroups,
+		}
+
+		if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+			// Starting in version 4, the response will include group.instance.id info for members.
+			// Starting in version 5, the response uses flexible encoding
+			describeReq.Version = 5
+		} else if ca.conf.Version.IsAtLeast(V2_3_0_0) {
+			// Starting in version 3, authorized operations can be requested.
+			describeReq.Version = 3
+		} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+			// Version 2 is the same as version 0.
+			describeReq.Version = 2
+		} else if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+			// Version 1 is the same as version 0.
+			describeReq.Version = 1
+		}
+		response, err := broker.DescribeGroups(describeReq)
+		if err != nil {
+			return nil, err
+		}
+
+		result = append(result, response.Groups...)
+	}
+	return result, nil
+}
+
+func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
+	allGroups = make(map[string]string)
+
+	// Query brokers in parallel, since we have to query *all* brokers
+	brokers := ca.client.Brokers()
+	groupMaps := make(chan map[string]string, len(brokers))
+	errChan := make(chan error, len(brokers))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokers {
+		wg.Add(1)
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			request := &ListGroupsRequest{}
+			if ca.conf.Version.IsAtLeast(V3_8_0_0) {
+				// Version 5 adds the TypesFilter field (KIP-848).
+				request.Version = 5
+			} else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+				// Version 4 adds the StatesFilter field (KIP-518).
+				request.Version = 4
+			} else if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+				// Version 3 is the first flexible version.
+				request.Version = 3
+			} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+				// Version 2 is the same as version 0.
+				request.Version = 2
+			} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+				// Version 1 is the same as version 0.
+				request.Version = 1
+			}
+
+			response, err := b.ListGroups(request)
+			if err != nil {
+				errChan <- err
+				return
+			}
+
+			groupMaps <- maps.Clone(response.Groups)
+		}(b, ca.conf)
+	}
+
+	wg.Wait()
+	close(groupMaps)
+	close(errChan)
+
+	for groupMap := range groupMaps {
+		maps.Copy(allGroups, groupMap)
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
+	var response *OffsetFetchResponse
+	request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions)
+	err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.FetchOffset(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.Err, ErrNoError) {
+			return response.Err
+		}
+
+		return nil
+	})
+
+	return response, err
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error {
+	var response *DeleteOffsetsResponse
+	request := &DeleteOffsetsRequest{
+		Group: group,
+		partitions: map[string][]int32{
+			topic: {partition},
+		},
+	}
+
+	return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.DeleteOffsets(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.ErrorCode, ErrNoError) {
+			return response.ErrorCode
+		}
+		if !errors.Is(response.Errors[topic][partition], ErrNoError) {
+			return response.Errors[topic][partition]
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+	var response *DeleteGroupsResponse
+	request := &DeleteGroupsRequest{
+		Groups: []string{group},
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.DeleteGroups(request)
+		if err != nil {
+			return err
+		}
+
+		groupErr, ok := response.GroupErrorCodes[group]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(groupErr, ErrNoError) {
+			return groupErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
+	type result struct {
+		id      int32
+		logdirs []DescribeLogDirsResponseDirMetadata
+	}
+	// Query brokers in parallel, since we may have to query multiple brokers
+	logDirsResults := make(chan result, len(brokerIds))
+	errChan := make(chan error, len(brokerIds))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokerIds {
+		broker, err := ca.findBroker(b)
+		if err != nil {
+			Logger.Printf("Unable to find broker with ID = %v\n", b)
+			continue
+		}
+		wg.Add(1)
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			request := &DescribeLogDirsRequest{}
+			if ca.conf.Version.IsAtLeast(V3_3_0_0) {
+				request.Version = 4
+			} else if ca.conf.Version.IsAtLeast(V3_2_0_0) {
+				request.Version = 3
+			} else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+				request.Version = 2
+			} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+				request.Version = 1
+			}
+			response, err := b.DescribeLogDirs(request)
+			if err != nil {
+				errChan <- err
+				return
+			}
+			if !errors.Is(response.ErrorCode, ErrNoError) {
+				errChan <- response.ErrorCode
+				return
+			}
+			logDirsResults <- result{id: b.ID(), logdirs: response.LogDirs}
+		}(broker, ca.conf)
+	}
+
+	wg.Wait()
+	close(logDirsResults)
+	close(errChan)
+
+	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
+	for logDirsResult := range logDirsResults {
+		allLogDirs[logDirsResult.id] = logDirsResult.logdirs
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
+	req := &DescribeUserScramCredentialsRequest{}
+	for _, u := range users {
+		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
+			Name: u,
+		})
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(upsert, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(nil, delete)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	req := &AlterUserScramCredentialsRequest{
+		Deletions:  d,
+		Upsertions: u,
+	}
+
+	var rsp *AlterUserScramCredentialsResponse
+	err := ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err = b.AlterUserScramCredentials(req)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+// Describe All : use an empty/nil components slice + strict = false
+// Contains components: strict = false
+// Contains only components: strict = true
+func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) {
+	request := NewDescribeClientQuotasRequest(
+		ca.conf.Version,
+		components,
+		strict,
+	)
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeClientQuotas(request)
+	if err != nil {
+		return nil, err
+	}
+
+	if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 {
+		return nil, errors.New(*rsp.ErrorMsg)
+	}
+	if !errors.Is(rsp.ErrorCode, ErrNoError) {
+		return nil, rsp.ErrorCode
+	}
+
+	return rsp.Entries, nil
+}
+
+func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error {
+	entry := AlterClientQuotasEntry{
+		Entity: entity,
+		Ops:    []ClientQuotasOp{op},
+	}
+
+	request := &AlterClientQuotasRequest{
+		Entries:      []AlterClientQuotasEntry{entry},
+		ValidateOnly: validateOnly,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.AlterClientQuotas(request)
+	if err != nil {
+		return err
+	}
+
+	for _, entry := range rsp.Entries {
+		if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 {
+			return errors.New(*entry.ErrorMsg)
+		}
+		if !errors.Is(entry.ErrorCode, ErrNoError) {
+			return entry.ErrorCode
+		}
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) {
+	if !ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0")
+	}
+	var response *LeaveGroupResponse
+	request := &LeaveGroupRequest{
+		Version: 3,
+		GroupId: group,
+	}
+	for _, instanceId := range groupInstanceIds {
+		groupInstanceId := instanceId
+		request.Members = append(request.Members, MemberIdentity{
+			GroupInstanceId: &groupInstanceId,
+		})
+	}
+	err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.LeaveGroup(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.Err, ErrNoError) {
+			return response.Err
+		}
+
+		return nil
+	})
+
+	return response, err
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
new file mode 100644
index 0000000..95554e3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
@@ -0,0 +1,203 @@
+package sarama
+
+// AlterClientQuotas Request (Version: 0) => [entries] validate_only
+//   entries => [entity] [ops]
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+//     ops => key value remove
+//       key => STRING
+//       value => FLOAT64
+//       remove => BOOLEAN
+//   validate_only => BOOLEAN
+
+type AlterClientQuotasRequest struct {
+	Version      int16
+	Entries      []AlterClientQuotasEntry // The quota configuration entries to alter.
+	ValidateOnly bool                     // Whether the alteration should be validated, but not performed.
+}
+
+func (a *AlterClientQuotasRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterClientQuotasEntry struct {
+	Entity []QuotaEntityComponent // The quota entity to alter.
+	Ops    []ClientQuotasOp       // An individual quota configuration entry to alter.
+}
+
+type ClientQuotasOp struct {
+	Key    string  // The quota configuration key.
+	Value  float64 // The value to set, otherwise ignored if the value is to be removed.
+	Remove bool    // Whether the quota configuration value should be removed, otherwise set.
+}
+
+func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error {
+	// Entries
+	if err := pe.putArrayLength(len(a.Entries)); err != nil {
+		return err
+	}
+	for _, e := range a.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// ValidateOnly
+	pe.putBool(a.ValidateOnly)
+
+	return nil
+}
+
+func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		a.Entries = make([]AlterClientQuotasEntry, entryCount)
+		for i := range a.Entries {
+			e := AlterClientQuotasEntry{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entries[i] = e
+		}
+	} else {
+		a.Entries = []AlterClientQuotasEntry{}
+	}
+
+	// ValidateOnly
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error {
+	// Entity
+	if err := pe.putArrayLength(len(a.Entity)); err != nil {
+		return err
+	}
+	for _, component := range a.Entity {
+		if err := component.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Ops
+	if err := pe.putArrayLength(len(a.Ops)); err != nil {
+		return err
+	}
+	for _, o := range a.Ops {
+		if err := o.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		a.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entity[i] = component
+		}
+	} else {
+		a.Entity = []QuotaEntityComponent{}
+	}
+
+	// Ops
+	opCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if opCount > 0 {
+		a.Ops = make([]ClientQuotasOp, opCount)
+		for i := range a.Ops {
+			c := ClientQuotasOp{}
+			if err = c.decode(pd, version); err != nil {
+				return err
+			}
+			a.Ops[i] = c
+		}
+	} else {
+		a.Ops = []ClientQuotasOp{}
+	}
+
+	return nil
+}
+
+func (c *ClientQuotasOp) encode(pe packetEncoder) error {
+	// Key
+	if err := pe.putString(c.Key); err != nil {
+		return err
+	}
+
+	// Value
+	pe.putFloat64(c.Value)
+
+	// Remove
+	pe.putBool(c.Remove)
+
+	return nil
+}
+
+func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error {
+	// Key
+	key, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.Key = key
+
+	// Value
+	value, err := pd.getFloat64()
+	if err != nil {
+		return err
+	}
+	c.Value = value
+
+	// Remove
+	remove, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	c.Remove = remove
+
+	return nil
+}
+
+func (a *AlterClientQuotasRequest) key() int16 {
+	return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AlterClientQuotasRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AlterClientQuotasRequest) isValidVersion() bool {
+	return a.Version == 0
+}
+
+func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion {
+	return V2_6_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
new file mode 100644
index 0000000..2ca4974
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries]
+//   throttle_time_ms => INT32
+//   entries => error_code error_message [entity]
+//     error_code => INT16
+//     error_message => NULLABLE_STRING
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+
+type AlterClientQuotasResponse struct {
+	Version      int16
+	ThrottleTime time.Duration                    // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	Entries      []AlterClientQuotasEntryResponse // The quota configuration entries altered.
+}
+
+func (a *AlterClientQuotasResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterClientQuotasEntryResponse struct {
+	ErrorCode KError                 // The error code, or `0` if the quota alteration succeeded.
+	ErrorMsg  *string                // The error message, or `null` if the quota alteration succeeded.
+	Entity    []QuotaEntityComponent // The quota entity altered.
+}
+
+func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	// Entries
+	if err := pe.putArrayLength(len(a.Entries)); err != nil {
+		return err
+	}
+	for _, e := range a.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		a.Entries = make([]AlterClientQuotasEntryResponse, entryCount)
+		for i := range a.Entries {
+			e := AlterClientQuotasEntryResponse{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entries[i] = e
+		}
+	} else {
+		a.Entries = []AlterClientQuotasEntryResponse{}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error {
+	// ErrorCode
+	pe.putKError(a.ErrorCode)
+
+	// ErrorMsg
+	if err := pe.putNullableString(a.ErrorMsg); err != nil {
+		return err
+	}
+
+	// Entity
+	if err := pe.putArrayLength(len(a.Entity)); err != nil {
+		return err
+	}
+	for _, component := range a.Entity {
+		if err := component.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) (err error) {
+	// ErrorCode
+	a.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	// ErrorMsg
+	errMsg, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	a.ErrorMsg = errMsg
+
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		a.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entity[i] = component
+		}
+	} else {
+		a.Entity = []QuotaEntityComponent{}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasResponse) key() int16 {
+	return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AlterClientQuotasResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AlterClientQuotasResponse) isValidVersion() bool {
+	return a.Version == 0
+}
+
+func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion {
+	return V2_6_0_0
+}
+
+func (r *AlterClientQuotasResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go
new file mode 100644
index 0000000..5293b49
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_request.go
@@ -0,0 +1,142 @@
+package sarama
+
+// AlterConfigsRequest is an alter config request type
+type AlterConfigsRequest struct {
+	Version      int16
+	Resources    []*AlterConfigsResource
+	ValidateOnly bool
+}
+
+func (a *AlterConfigsRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+// AlterConfigsResource is an alter config resource type
+type AlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]*string
+}
+
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+	return nil
+}
+
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &AlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range a.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (a *AlterConfigsRequest) key() int16 {
+	return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AlterConfigsRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go
new file mode 100644
index 0000000..cde2686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_response.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+// AlterConfigsResponse is a response type for alter config
+type AlterConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *AlterConfigsResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterConfigError struct {
+	Err    KError
+	ErrMsg string
+}
+
+func (c *AlterConfigError) Error() string {
+	text := c.Err.Error()
+	if c.ErrMsg != "" {
+		text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+	}
+	return text
+}
+
+// AlterConfigsResourceResponse is a response type for alter config resource
+type AlterConfigsResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+}
+
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
+	pe.putInt16(a.ErrorCode)
+	err := pe.putString(a.ErrorMsg)
+	if err != nil {
+		return err
+	}
+	pe.putInt8(int8(a.Type))
+	err = pe.putString(a.Name)
+	if err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.ErrorCode = errCode
+
+	e, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	if e == nil {
+		a.ErrorMsg = ""
+	} else {
+		a.ErrorMsg = *e
+	}
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *AlterConfigsResponse) key() int16 {
+	return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AlterConfigsResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *AlterConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
new file mode 100644
index 0000000..165cbcc
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
@@ -0,0 +1,146 @@
+package sarama
+
+type alterPartitionReassignmentsBlock struct {
+	replicas []int32
+}
+
+func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
+	if err := pe.putNullableInt32Array(b.replicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
+	if b.replicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type AlterPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
+	Version   int16
+}
+
+func (r *AlterPartitionReassignmentsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+				r.blocks[topic][partition] = block
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterPartitionReassignmentsRequest) key() int16 {
+	return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
new file mode 100644
index 0000000..886c4f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
@@ -0,0 +1,178 @@
+package sarama
+
+import "time"
+
+type alterPartitionReassignmentsErrorBlock struct {
+	errorCode    KError
+	errorMessage *string
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
+	pe.putKError(b.errorCode)
+	if err := pe.putNullableString(b.errorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
+	b.errorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+	b.errorMessage, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type AlterPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
+}
+
+func (r *AlterPartitionReassignmentsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
+		r.Errors[topic] = partitions
+	}
+
+	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
+}
+
+func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			ongoingPartitionReassignments, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
+
+			for j := 0; j < ongoingPartitionReassignments; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsErrorBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+
+				r.Errors[topic][partition] = block
+			}
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterPartitionReassignmentsResponse) key() int16 {
+	return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
new file mode 100644
index 0000000..d50a38d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
@@ -0,0 +1,160 @@
+package sarama
+
+type AlterUserScramCredentialsRequest struct {
+	Version int16
+
+	// Deletions represent list of SCRAM credentials to remove
+	Deletions []AlterUserScramCredentialsDelete
+
+	// Upsertions represent list of SCRAM credentials to update/insert
+	Upsertions []AlterUserScramCredentialsUpsert
+}
+
+func (r *AlterUserScramCredentialsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type AlterUserScramCredentialsDelete struct {
+	Name      string
+	Mechanism ScramMechanismType
+}
+
+type AlterUserScramCredentialsUpsert struct {
+	Name           string
+	Mechanism      ScramMechanismType
+	Iterations     int32
+	Salt           []byte
+	saltedPassword []byte
+
+	// This field is never transmitted over the wire
+	// @see: https://tools.ietf.org/html/rfc5802
+	Password []byte
+}
+
+func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Deletions)); err != nil {
+		return err
+	}
+	for _, d := range r.Deletions {
+		if err := pe.putString(d.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(d.Mechanism))
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if err := pe.putArrayLength(len(r.Upsertions)); err != nil {
+		return err
+	}
+	for _, u := range r.Upsertions {
+		if err := pe.putString(u.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(u.Mechanism))
+		pe.putInt32(u.Iterations)
+
+		if err := pe.putBytes(u.Salt); err != nil {
+			return err
+		}
+
+		// do not transmit the password over the wire
+		formatter := scramFormatter{mechanism: u.Mechanism}
+		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
+		if err != nil {
+			return err
+		}
+
+		if err := pe.putBytes(salted); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	numDeletions, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
+	for i := 0; i < numDeletions; i++ {
+		r.Deletions[i] = AlterUserScramCredentialsDelete{}
+		if r.Deletions[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	numUpsertions, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
+	for i := 0; i < numUpsertions; i++ {
+		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
+		if r.Upsertions[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
+		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].Salt, err = pd.getBytes(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].saltedPassword, err = pd.getBytes(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterUserScramCredentialsRequest) key() int16 {
+	return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
new file mode 100644
index 0000000..229ed54
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import "time"
+
+type AlterUserScramCredentialsResponse struct {
+	Version int16
+
+	ThrottleTime time.Duration
+
+	Results []*AlterUserScramCredentialsResult
+}
+
+func (r *AlterUserScramCredentialsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type AlterUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+	if err := pe.putArrayLength(len(r.Results)); err != nil {
+		return err
+	}
+
+	for _, u := range r.Results {
+		if err := pe.putString(u.User); err != nil {
+			return err
+		}
+		pe.putKError(u.ErrorCode)
+		if err := pe.putNullableString(u.ErrorMessage); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	numResults, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numResults > 0 {
+		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
+		for i := 0; i < numResults; i++ {
+			r.Results[i] = &AlterUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getString(); err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+
+			if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterUserScramCredentialsResponse) key() int16 {
+	return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
+
+func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions.go b/vendor/github.com/IBM/sarama/api_versions.go
new file mode 100644
index 0000000..e993432
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions.go
@@ -0,0 +1,84 @@
+package sarama
+
+type apiVersionRange struct {
+	minVersion int16
+	maxVersion int16
+}
+
+type apiVersionMap map[int16]*apiVersionRange
+
+// restrictApiVersion selects the appropriate API version for a given protocol body according to
+// the client and broker version ranges. By default, it selects the maximum version supported by both
+// client and broker, capped by the maximum Kafka version from Config.
+// It then calls setVersion() on the protocol body.
+// If no valid version is found, an error is returned.
+func restrictApiVersion(pb protocolBody, brokerVersions apiVersionMap) error {
+	key := pb.key()
+	// Since message constructors take a Kafka version and select the maximum supported protocol version already, we can
+	// rely on pb.version() being the max version supported for the user-selected Kafka API version.
+	clientMax := pb.version()
+
+	if brokerVersionRange := brokerVersions[key]; brokerVersionRange != nil {
+		// Select the maximum version that both client and server support
+		// Clamp to the client max to respect user preference above broker advertised version range
+		pb.setVersion(min(clientMax, max(min(clientMax, brokerVersionRange.maxVersion), brokerVersionRange.minVersion)))
+		return nil
+	}
+
+	return nil // no version ranges available, no restriction
+}
+
+const (
+	apiKeyProduce                      = 0
+	apiKeyFetch                        = 1
+	apiKeyListOffsets                  = 2
+	apiKeyMetadata                     = 3
+	apiKeyLeaderAndIsr                 = 4
+	apiKeyStopReplica                  = 5
+	apiKeyUpdateMetadata               = 6
+	apiKeyControlledShutdown           = 7
+	apiKeyOffsetCommit                 = 8
+	apiKeyOffsetFetch                  = 9
+	apiKeyFindCoordinator              = 10
+	apiKeyJoinGroup                    = 11
+	apiKeyHeartbeat                    = 12
+	apiKeyLeaveGroup                   = 13
+	apiKeySyncGroup                    = 14
+	apiKeyDescribeGroups               = 15
+	apiKeyListGroups                   = 16
+	apiKeySaslHandshake                = 17
+	apiKeyApiVersions                  = 18
+	apiKeyCreateTopics                 = 19
+	apiKeyDeleteTopics                 = 20
+	apiKeyDeleteRecords                = 21
+	apiKeyInitProducerId               = 22
+	apiKeyOffsetForLeaderEpoch         = 23
+	apiKeyAddPartitionsToTxn           = 24
+	apiKeyAddOffsetsToTxn              = 25
+	apiKeyEndTxn                       = 26
+	apiKeyWriteTxnMarkers              = 27
+	apiKeyTxnOffsetCommit              = 28
+	apiKeyDescribeAcls                 = 29
+	apiKeyCreateAcls                   = 30
+	apiKeyDeleteAcls                   = 31
+	apiKeyDescribeConfigs              = 32
+	apiKeyAlterConfigs                 = 33
+	apiKeyAlterReplicaLogDirs          = 34
+	apiKeyDescribeLogDirs              = 35
+	apiKeySASLAuth                     = 36
+	apiKeyCreatePartitions             = 37
+	apiKeyCreateDelegationToken        = 38
+	apiKeyRenewDelegationToken         = 39
+	apiKeyExpireDelegationToken        = 40
+	apiKeyDescribeDelegationToken      = 41
+	apiKeyDeleteGroups                 = 42
+	apiKeyElectLeaders                 = 43
+	apiKeyIncrementalAlterConfigs      = 44
+	apiKeyAlterPartitionReassignments  = 45
+	apiKeyListPartitionReassignments   = 46
+	apiKeyOffsetDelete                 = 47
+	apiKeyDescribeClientQuotas         = 48
+	apiKeyAlterClientQuotas            = 49
+	apiKeyDescribeUserScramCredentials = 50
+	apiKeyAlterUserScramCredentials    = 51
+)
diff --git a/vendor/github.com/IBM/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go
new file mode 100644
index 0000000..593b0f0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+const defaultClientSoftwareName = "sarama"
+
+type ApiVersionsRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ClientSoftwareName contains the name of the client.
+	ClientSoftwareName string
+	// ClientSoftwareVersion contains the version of the client.
+	ClientSoftwareVersion string
+}
+
+func (r *ApiVersionsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		if err := pe.putString(r.ClientSoftwareName); err != nil {
+			return err
+		}
+		if err := pe.putString(r.ClientSoftwareVersion); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 3 {
+		if r.ClientSoftwareName, err = pd.getString(); err != nil {
+			return err
+		}
+		if r.ClientSoftwareVersion, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+	return apiKeyApiVersions
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ApiVersionsRequest) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 2
+	}
+	return 1
+}
+
+func (r *ApiVersionsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go
new file mode 100644
index 0000000..3f3c11a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_response.go
@@ -0,0 +1,171 @@
+package sarama
+
+import (
+	"time"
+)
+
+// ApiVersionsResponseKey contains the APIs supported by the broker.
+type ApiVersionsResponseKey struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ApiKey contains the API index.
+	ApiKey int16
+	// MinVersion contains the minimum supported version, inclusive.
+	MinVersion int16
+	// MaxVersion contains the maximum supported version, inclusive.
+	MaxVersion int16
+}
+
+func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) {
+	a.Version = version
+	pe.putInt16(a.ApiKey)
+
+	pe.putInt16(a.MinVersion)
+
+	pe.putInt16(a.MaxVersion)
+
+	if version >= 3 {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) {
+	a.Version = version
+	if a.ApiKey, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if a.MinVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if a.MaxVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ApiVersionsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ErrorCode contains the top-level error code.
+	ErrorCode int16
+	// ApiKeys contains the APIs supported by the broker.
+	ApiKeys []ApiVersionsResponseKey
+	// ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTimeMs int32
+}
+
+func (r *ApiVersionsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err := pe.putArrayLength(len(r.ApiKeys)); err != nil {
+		return err
+	}
+	for _, block := range r.ApiKeys {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if r.Version >= 3 {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ErrorCode, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	// KIP-511: if broker didn't understand the ApiVersionsRequest version then
+	// it replies with a V0 non-flexible ApiVersionResponse where its supported
+	// ApiVersionsRequest version is available in ApiKeys
+	if r.ErrorCode == int16(ErrUnsupportedVersion) {
+		// drop version to 0 and to revert packageDecoder to non-flexible for remaining decoding
+		r.Version = 0
+		pd = downgradeFlexibleDecoder(pd)
+	}
+
+	numApiKeys, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys)
+	for i := 0; i < numApiKeys; i++ {
+		var block ApiVersionsResponseKey
+		if err = block.decode(pd, r.Version); err != nil {
+			return err
+		}
+		r.ApiKeys[i] = block
+	}
+
+	if r.Version >= 1 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+	return apiKeyApiVersions
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ApiVersionsResponse) headerVersion() int16 {
+	// ApiVersionsResponse always includes a v0 header.
+	// See KIP-511 for details
+	return 0
+}
+
+func (r *ApiVersionsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *ApiVersionsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go
new file mode 100644
index 0000000..e34eed5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/async_producer.go
@@ -0,0 +1,1427 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"time"
+
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/eapache/queue"
+	"github.com/rcrowley/go-metrics"
+)
+
+// ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied.
+var ErrProducerRetryBufferOverflow = errors.New("retry buffer full: message discarded to prevent buffer overflow")
+
+const (
+	// minFunctionalRetryBufferLength defines the minimum number of messages the retry buffer must support.
+	// If Producer.Retry.MaxBufferLength is set to a non-zero value below this limit, it will be adjusted to this value.
+	// This ensures the retry buffer remains functional under typical workloads.
+	minFunctionalRetryBufferLength = 4 * 1024
+	// minFunctionalRetryBufferBytes defines the minimum total byte size the retry buffer must support.
+	// If Producer.Retry.MaxBufferBytes is set to a non-zero value below this limit, it will be adjusted to this value.
+	// A 32 MB lower limit ensures sufficient capacity for retrying larger messages without exhausting resources.
+	minFunctionalRetryBufferBytes = 32 * 1024 * 1024
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks and message lost: it will not be garbage-collected automatically when it passes
+// out of scope and buffered messages may not be flushed.
+type AsyncProducer interface {
+	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
+	// when both the Errors and Successes channels have been closed. When calling
+	// AsyncClose, you *must* continue to read from those channels in order to
+	// drain the results of any messages in flight.
+	AsyncClose()
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before process
+	// shutting down, or you may lose messages. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+
+	// Input is the input channel for the user to write messages to that they
+	// wish to send.
+	Input() chan<- *ProducerMessage
+
+	// Successes is the success output channel back to the user when Return.Successes is
+	// enabled. If Return.Successes is true, you MUST read from this channel or the
+	// Producer will deadlock. It is suggested that you send and read messages
+	// together in a single select statement.
+	Successes() <-chan *ProducerMessage
+
+	// Errors is the error output channel back to the user. You MUST read from this
+	// channel or the Producer will deadlock when the channel is full. Alternatively,
+	// you can set Producer.Return.Errors in your config to false, which prevents
+	// errors to be returned.
+	Errors() <-chan *ProducerError
+
+	// IsTransactional return true when current producer is transactional.
+	IsTransactional() bool
+
+	// TxnStatus return current producer transaction status.
+	TxnStatus() ProducerTxnStatusFlag
+
+	// BeginTxn mark current transaction as ready.
+	BeginTxn() error
+
+	// CommitTxn commit current transaction.
+	CommitTxn() error
+
+	// AbortTxn abort current transaction.
+	AbortTxn() error
+
+	// AddOffsetsToTxn add associated offsets to current transaction.
+	AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+	// AddMessageToTxn add message offsets to current transaction.
+	AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type asyncProducer struct {
+	client Client
+	conf   *Config
+
+	errors                    chan *ProducerError
+	input, successes, retries chan *ProducerMessage
+	inFlight                  sync.WaitGroup
+
+	brokers    map[*Broker]*brokerProducer
+	brokerRefs map[*brokerProducer]int
+	brokerLock sync.Mutex
+
+	txnmgr *transactionManager
+	txLock sync.Mutex
+
+	metricsRegistry metrics.Registry
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	return newAsyncProducer(client)
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	txnmgr, err := newTransactionManager(client.Config(), client)
+	if err != nil {
+		return nil, err
+	}
+
+	p := &asyncProducer{
+		client:          client,
+		conf:            client.Config(),
+		errors:          make(chan *ProducerError),
+		input:           make(chan *ProducerMessage),
+		successes:       make(chan *ProducerMessage),
+		retries:         make(chan *ProducerMessage),
+		brokers:         make(map[*Broker]*brokerProducer),
+		brokerRefs:      make(map[*brokerProducer]int),
+		txnmgr:          txnmgr,
+		metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry),
+	}
+
+	// launch our singleton dispatchers
+	go withRecover(p.dispatcher)
+	go withRecover(p.retryHandler)
+
+	return p, nil
+}
+
+type flagSet int8
+
+const (
+	syn       flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+	fin                           // final message from partitionProducer to brokerProducer and back
+	shutdown                      // start the shutdown process
+	endtxn                        // endtxn
+	committxn                     // endtxn
+	aborttxn                      // endtxn
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+	Topic string // The Kafka topic for this message.
+	// The partitioning key for this message. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Key Encoder
+	// The actual message to store in Kafka. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Value Encoder
+
+	// The headers are key-value pairs that are transparently passed
+	// by Kafka between producers and consumers.
+	Headers []RecordHeader
+
+	// This field is used to hold arbitrary data you wish to include so it
+	// will be available when receiving on the Successes and Errors channels.
+	// Sarama completely ignores this field and is only to be used for
+	// pass-through data.
+	Metadata interface{}
+
+	// Below this point are filled in by the producer as the message is processed
+
+	// Offset is the offset of the message stored on the broker. This is only
+	// guaranteed to be defined if the message was successfully delivered and
+	// RequiredAcks is not NoResponse.
+	Offset int64
+	// Partition is the partition that the message was sent to. This is only
+	// guaranteed to be defined if the message was successfully delivered.
+	Partition int32
+	// Timestamp can vary in behavior depending on broker configuration, being
+	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+	// and requiring version at least 0.10.0.
+	//
+	// When configured to CreateTime, the timestamp is specified by the producer
+	// either by explicitly setting this field, or when the message is added
+	// to a produce set.
+	//
+	// When configured to LogAppendTime, the timestamp assigned to the message
+	// by the broker. This is only guaranteed to be defined if the message was
+	// successfully delivered and RequiredAcks is not NoResponse.
+	Timestamp time.Time
+
+	retries        int
+	flags          flagSet
+	expectation    chan *ProducerError
+	sequenceNumber int32
+	producerEpoch  int16
+	hasSequence    bool
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) ByteSize(version int) int {
+	var size int
+	if version >= 2 {
+		size = maximumRecordOverhead
+		for _, h := range m.Headers {
+			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+		}
+	} else {
+		size = producerMessageOverhead
+	}
+	if m.Key != nil {
+		size += m.Key.Length()
+	}
+	if m.Value != nil {
+		size += m.Value.Length()
+	}
+	return size
+}
+
+func (m *ProducerMessage) clear() {
+	m.flags = 0
+	m.retries = 0
+	m.sequenceNumber = 0
+	m.producerEpoch = 0
+	m.hasSequence = false
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+	Msg *ProducerMessage
+	Err error
+}
+
+func (pe ProducerError) Error() string {
+	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+func (pe ProducerError) Unwrap() error {
+	return pe.Err
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) IsTransactional() bool {
+	return p.txnmgr.isTransactional()
+}
+
+func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+	offsets := make(map[string][]*PartitionOffsetMetadata)
+	offsets[msg.Topic] = []*PartitionOffsetMetadata{
+		{
+			Partition: msg.Partition,
+			Offset:    msg.Offset + 1,
+			Metadata:  metadata,
+		},
+	}
+	return p.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+
+	DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID)
+	return p.txnmgr.addOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag {
+	return p.txnmgr.currentTxnStatus()
+}
+
+func (p *asyncProducer) BeginTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer")
+		return ErrNonTransactedProducer
+	}
+
+	return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil)
+}
+
+func (p *asyncProducer) CommitTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+
+	DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID)
+	err := p.finishTransaction(true)
+	if err != nil {
+		return err
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID)
+	return nil
+}
+
+func (p *asyncProducer) AbortTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID)
+	err := p.finishTransaction(false)
+	if err != nil {
+		return err
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID)
+	return nil
+}
+
+func (p *asyncProducer) finishTransaction(commit bool) error {
+	p.inFlight.Add(1)
+	if commit {
+		p.input <- &ProducerMessage{flags: endtxn | committxn}
+	} else {
+		p.input <- &ProducerMessage{flags: endtxn | aborttxn}
+	}
+	p.inFlight.Wait()
+	return p.txnmgr.finishTransaction(commit)
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+	return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+	return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+	return p.input
+}
+
+func (p *asyncProducer) Close() error {
+	p.AsyncClose()
+
+	if p.conf.Producer.Return.Successes {
+		go withRecover(func() {
+			for range p.successes {
+			}
+		})
+	}
+
+	var pErrs ProducerErrors
+	if p.conf.Producer.Return.Errors {
+		for event := range p.errors {
+			pErrs = append(pErrs, event)
+		}
+	} else {
+		<-p.errors
+	}
+
+	if len(pErrs) > 0 {
+		return pErrs
+	}
+	return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+	go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+	handlers := make(map[string]chan<- *ProducerMessage)
+	shuttingDown := false
+
+	for msg := range p.input {
+		if msg == nil {
+			Logger.Println("Something tried to send a nil message, it was ignored.")
+			continue
+		}
+
+		if msg.flags&endtxn != 0 {
+			var err error
+			if msg.flags&committxn != 0 {
+				err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil)
+			} else {
+				err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil)
+			}
+			if err != nil {
+				Logger.Printf("producer/txnmgr unable to end transaction %s", err)
+			}
+			p.inFlight.Done()
+			continue
+		}
+
+		if msg.flags&shutdown != 0 {
+			shuttingDown = true
+			p.inFlight.Done()
+			continue
+		}
+
+		if msg.retries == 0 {
+			if shuttingDown {
+				// we can't just call returnError here because that decrements the wait group,
+				// which hasn't been incremented yet for this message, and shouldn't be
+				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+				if p.conf.Producer.Return.Errors {
+					p.errors <- pErr
+				} else {
+					Logger.Println(pErr)
+				}
+				continue
+			}
+			p.inFlight.Add(1)
+			// Ignore retried msg, there are already in txn.
+			// Can't produce new record when transaction is not started.
+			if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+				Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction)
+				p.returnError(msg, ErrTransactionNotReady)
+				continue
+			}
+		}
+
+		for _, interceptor := range p.conf.Producer.Interceptors {
+			msg.safelyApplyInterceptor(interceptor)
+		}
+
+		version := 1
+		if p.conf.Version.IsAtLeast(V0_11_0_0) {
+			version = 2
+		} else if msg.Headers != nil {
+			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+			continue
+		}
+
+		size := msg.ByteSize(version)
+		if size > p.conf.Producer.MaxMessageBytes {
+			p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes)))
+			continue
+		}
+
+		handler := handlers[msg.Topic]
+		if handler == nil {
+			handler = p.newTopicProducer(msg.Topic)
+			handlers[msg.Topic] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range handlers {
+		close(handler)
+	}
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+	parent *asyncProducer
+	topic  string
+	input  <-chan *ProducerMessage
+
+	breaker     *breaker.Breaker
+	handlers    map[int32]chan<- *ProducerMessage
+	partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	tp := &topicProducer{
+		parent:      p,
+		topic:       topic,
+		input:       input,
+		breaker:     breaker.New(3, 1, 10*time.Second),
+		handlers:    make(map[int32]chan<- *ProducerMessage),
+		partitioner: p.conf.Producer.Partitioner(topic),
+	}
+	go withRecover(tp.dispatch)
+	return input
+}
+
+func (tp *topicProducer) dispatch() {
+	for msg := range tp.input {
+		if msg.retries == 0 {
+			if err := tp.partitionMessage(msg); err != nil {
+				tp.parent.returnError(msg, err)
+				continue
+			}
+		}
+
+		handler := tp.handlers[msg.Partition]
+		if handler == nil {
+			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+			tp.handlers[msg.Partition] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range tp.handlers {
+		close(handler)
+	}
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+	var partitions []int32
+
+	err := tp.breaker.Run(func() (err error) {
+		requiresConsistency := false
+		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+			requiresConsistency = ep.MessageRequiresConsistency(msg)
+		} else {
+			requiresConsistency = tp.partitioner.RequiresConsistency()
+		}
+
+		if requiresConsistency {
+			partitions, err = tp.parent.client.Partitions(msg.Topic)
+		} else {
+			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+		}
+		return
+	})
+	if err != nil {
+		return err
+	}
+
+	numPartitions := int32(len(partitions))
+
+	if numPartitions == 0 {
+		return ErrLeaderNotAvailable
+	}
+
+	choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+	if err != nil {
+		return err
+	} else if choice < 0 || choice >= numPartitions {
+		return ErrInvalidPartition
+	}
+
+	msg.Partition = partitions[choice]
+
+	return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+	parent    *asyncProducer
+	topic     string
+	partition int32
+	input     <-chan *ProducerMessage
+
+	leader         *Broker
+	breaker        *breaker.Breaker
+	brokerProducer *brokerProducer
+
+	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+	// therefore whether our buffer is complete and safe to flush)
+	highWatermark int
+	retryState    []partitionRetryState
+}
+
+type partitionRetryState struct {
+	buf          []*ProducerMessage
+	expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	pp := &partitionProducer{
+		parent:    p,
+		topic:     topic,
+		partition: partition,
+		input:     input,
+
+		breaker:    breaker.New(3, 1, 10*time.Second),
+		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+	}
+	go withRecover(pp.dispatch)
+	return input
+}
+
+func (pp *partitionProducer) backoff(retries int) {
+	var backoff time.Duration
+	if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
+		maxRetries := pp.parent.conf.Producer.Retry.Max
+		backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
+	} else {
+		backoff = pp.parent.conf.Producer.Retry.Backoff
+	}
+	if backoff > 0 {
+		time.Sleep(backoff)
+	}
+}
+
+func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error {
+	if pp.brokerProducer == nil {
+		if err := pp.updateLeader(); err != nil {
+			pp.parent.returnError(msg, err)
+			pp.backoff(msg.retries)
+			return err
+		}
+		Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	}
+	return nil
+}
+
+func (pp *partitionProducer) dispatch() {
+	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+	// on the first message
+	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+	if pp.leader != nil {
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+	}
+
+	defer func() {
+		if pp.brokerProducer != nil {
+			pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+		}
+	}()
+
+	for msg := range pp.input {
+		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
+			select {
+			case <-pp.brokerProducer.abandoned:
+				// a message on the abandoned channel means that our current broker selection is out of date
+				Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+				pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+				pp.brokerProducer = nil
+				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+			default:
+				// producer connection is still open.
+			}
+		}
+
+		if msg.retries > pp.highWatermark {
+			if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+				continue
+			}
+			// a new, higher, retry level; handle it and then back off
+			pp.newHighWatermark(msg.retries)
+			pp.backoff(msg.retries)
+		} else if pp.highWatermark > 0 {
+			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+			if msg.retries < pp.highWatermark {
+				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+				if msg.flags&fin == fin {
+					pp.retryState[msg.retries].expectChaser = false
+					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				} else {
+					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+				}
+				continue
+			} else if msg.flags&fin == fin {
+				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+				// meaning this retry level is done and we can go down (at least) one level and flush that
+				pp.retryState[pp.highWatermark].expectChaser = false
+				pp.flushRetryBuffers()
+				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				continue
+			}
+		}
+
+		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+		// without breaking any of our ordering guarantees
+		if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+			continue
+		}
+
+		// Now that we know we have a broker to actually try and send this message to, generate the sequence
+		// number for it.
+		// All messages being retried (sent or not) have already had their retry count updated
+		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
+		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
+			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+			msg.hasSequence = true
+		}
+
+		if pp.parent.IsTransactional() {
+			pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition)
+		}
+
+		pp.brokerProducer.input <- msg
+	}
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+	pp.highWatermark = hwm
+
+	// send off a fin so that we know when everything "in between" has made it
+	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+	pp.retryState[pp.highWatermark].expectChaser = true
+	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+	// a new HWM means that our current broker selection is out of date
+	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+	pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+	for {
+		pp.highWatermark--
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+				goto flushDone
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		for _, msg := range pp.retryState[pp.highWatermark].buf {
+			pp.brokerProducer.input <- msg
+		}
+
+	flushDone:
+		pp.retryState[pp.highWatermark].buf = nil
+		if pp.retryState[pp.highWatermark].expectChaser {
+			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+			break
+		} else if pp.highWatermark == 0 {
+			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+			break
+		}
+	}
+}
+
+func (pp *partitionProducer) updateLeader() error {
+	return pp.breaker.Run(func() (err error) {
+		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+			return err
+		}
+
+		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+			return err
+		}
+
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+		return nil
+	})
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+	var (
+		input     = make(chan *ProducerMessage)
+		bridge    = make(chan *produceSet)
+		pending   = make(chan *brokerProducerResponse)
+		responses = make(chan *brokerProducerResponse)
+	)
+
+	bp := &brokerProducer{
+		parent:         p,
+		broker:         broker,
+		input:          input,
+		output:         bridge,
+		responses:      responses,
+		buffer:         newProduceSet(p),
+		currentRetries: make(map[string]map[int32]error),
+	}
+	go withRecover(bp.run)
+
+	// minimal bridge to make the network response `select`able
+	go withRecover(func() {
+		// Use a wait group to know if we still have in flight requests
+		var wg sync.WaitGroup
+
+		for set := range bridge {
+			request := set.buildRequest()
+
+			// Count the in flight requests to know when we can close the pending channel safely
+			wg.Add(1)
+			// Capture the current set to forward in the callback
+			sendResponse := func(set *produceSet) ProduceCallback {
+				return func(response *ProduceResponse, err error) {
+					// Forward the response to make sure we do not block the responseReceiver
+					pending <- &brokerProducerResponse{
+						set: set,
+						err: err,
+						res: response,
+					}
+					wg.Done()
+				}
+			}(set)
+
+			if p.IsTransactional() {
+				// Add partition to tx before sending current batch
+				err := p.txnmgr.publishTxnPartitions()
+				if err != nil {
+					// Request failed to be sent
+					sendResponse(nil, err)
+					continue
+				}
+			}
+
+			// Use AsyncProduce vs Produce to not block waiting for the response
+			// so that we can pipeline multiple produce requests and achieve higher throughput, see:
+			// https://kafka.apache.org/protocol#protocol_network
+			err := broker.AsyncProduce(request, sendResponse)
+			if err != nil {
+				// Request failed to be sent
+				sendResponse(nil, err)
+				continue
+			}
+			// Callback is not called when using NoResponse
+			if p.conf.Producer.RequiredAcks == NoResponse {
+				// Provide the expected nil response
+				sendResponse(nil, nil)
+			}
+		}
+		// Wait for all in flight requests to close the pending channel safely
+		wg.Wait()
+		close(pending)
+	})
+
+	// In order to avoid a deadlock when closing the broker on network or malformed response error
+	// we use an intermediate channel to buffer and send pending responses in order
+	// This is because the AsyncProduce callback inside the bridge is invoked from the broker
+	// responseReceiver goroutine and closing the broker requires such goroutine to be finished
+	go withRecover(func() {
+		buf := queue.New()
+		for {
+			if buf.Length() == 0 {
+				res, ok := <-pending
+				if !ok {
+					// We are done forwarding the last pending response
+					close(responses)
+					return
+				}
+				buf.Add(res)
+			}
+			// Send the head pending response or buffer another one
+			// so that we never block the callback
+			headRes := buf.Peek().(*brokerProducerResponse)
+			select {
+			case res, ok := <-pending:
+				if !ok {
+					continue
+				}
+				buf.Add(res)
+				continue
+			case responses <- headRes:
+				buf.Remove()
+				continue
+			}
+		}
+	})
+
+	if p.conf.Producer.Retry.Max <= 0 {
+		bp.abandoned = make(chan struct{})
+	}
+
+	return bp
+}
+
+type brokerProducerResponse struct {
+	set *produceSet
+	err error
+	res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+	parent *asyncProducer
+	broker *Broker
+
+	input     chan *ProducerMessage
+	output    chan<- *produceSet
+	responses <-chan *brokerProducerResponse
+	abandoned chan struct{}
+
+	buffer     *produceSet
+	timer      *time.Timer
+	timerFired bool
+
+	closing        error
+	currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+	var output chan<- *produceSet
+	var timerChan <-chan time.Time
+	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+	for {
+		select {
+		case msg, ok := <-bp.input:
+			if !ok {
+				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
+				bp.shutdown()
+				return
+			}
+
+			if msg == nil {
+				continue
+			}
+
+			if msg.flags&syn == syn {
+				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+					bp.broker.ID(), msg.Topic, msg.Partition)
+				if bp.currentRetries[msg.Topic] == nil {
+					bp.currentRetries[msg.Topic] = make(map[int32]error)
+				}
+				bp.currentRetries[msg.Topic][msg.Partition] = nil
+				bp.parent.inFlight.Done()
+				continue
+			}
+
+			if reason := bp.needsRetry(msg); reason != nil {
+				bp.parent.retryMessage(msg, reason)
+
+				if bp.closing == nil && msg.flags&fin == fin {
+					// we were retrying this partition but we can start processing again
+					delete(bp.currentRetries[msg.Topic], msg.Partition)
+					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+						bp.broker.ID(), msg.Topic, msg.Partition)
+				}
+
+				continue
+			}
+
+			if msg.flags&fin == fin {
+				// New broker producer that was caught up by the retry loop
+				bp.parent.retryMessage(msg, ErrShuttingDown)
+				DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n",
+					bp.broker.ID(), msg.retries, msg.Topic, msg.Partition)
+				continue
+			}
+
+			if bp.buffer.wouldOverflow(msg) {
+				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, false); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+
+			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
+				// The epoch was reset, need to roll the buffer over
+				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, true); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+			if err := bp.buffer.add(msg); err != nil {
+				bp.parent.returnError(msg, err)
+				continue
+			}
+
+			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+				bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency)
+				timerChan = bp.timer.C
+			}
+		case <-timerChan:
+			bp.timerFired = true
+		case output <- bp.buffer:
+			bp.rollOver()
+			timerChan = nil
+		case response, ok := <-bp.responses:
+			if ok {
+				bp.handleResponse(response)
+			}
+		}
+
+		if bp.timerFired || bp.buffer.readyToFlush() {
+			output = bp.output
+		} else {
+			output = nil
+		}
+	}
+}
+
+func (bp *brokerProducer) shutdown() {
+	for !bp.buffer.empty() {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+		}
+	}
+	close(bp.output)
+	// Drain responses from the bridge goroutine
+	for response := range bp.responses {
+		bp.handleResponse(response)
+	}
+	// No more brokerProducer related goroutine should be running
+	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+	if bp.closing != nil {
+		return bp.closing
+	}
+
+	return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
+	for {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+			// handling a response can change our state, so re-check some things
+			if reason := bp.needsRetry(msg); reason != nil {
+				return reason
+			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
+				return nil
+			}
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+			return nil
+		}
+	}
+}
+
+func (bp *brokerProducer) rollOver() {
+	if bp.timer != nil {
+		bp.timer.Stop()
+	}
+	bp.timer = nil
+	bp.timerFired = false
+	bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+	if response.err != nil {
+		bp.handleError(response.set, response.err)
+	} else {
+		bp.handleSuccess(response.set, response.res)
+	}
+
+	if bp.buffer.empty() {
+		bp.rollOver() // this can happen if the response invalidated our buffer
+	}
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+	// we iterate through the blocks in the request set, not the response, so that we notice
+	// if the response is missing a block completely
+	var retryTopics []string
+	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+		if response == nil {
+			// this only happens when RequiredAcks is NoResponse, so we have to assume success
+			bp.parent.returnSuccesses(pSet.msgs)
+			return
+		}
+
+		block := response.GetBlock(topic, partition)
+		if block == nil {
+			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+			return
+		}
+
+		switch block.Err {
+		// Success
+		case ErrNoError:
+			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+				for _, msg := range pSet.msgs {
+					msg.Timestamp = block.Timestamp
+				}
+			}
+			for i, msg := range pSet.msgs {
+				msg.Offset = block.Offset + int64(i)
+			}
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Duplicate
+		case ErrDuplicateSequenceNumber:
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Retriable errors
+		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+				bp.parent.returnErrors(pSet.msgs, block.Err)
+			} else {
+				retryTopics = append(retryTopics, topic)
+			}
+		// Other non-retriable errors
+		default:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+			}
+			bp.parent.returnErrors(pSet.msgs, block.Err)
+		}
+	})
+
+	if len(retryTopics) > 0 {
+		if bp.parent.conf.Producer.Idempotent {
+			err := bp.parent.client.RefreshMetadata(retryTopics...)
+			if err != nil {
+				Logger.Printf("Failed refreshing metadata because of %v\n", err)
+			}
+		}
+
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			block := response.GetBlock(topic, partition)
+			if block == nil {
+				// handled in the previous "eachPartition" loop
+				return
+			}
+
+			switch block.Err {
+			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+					bp.broker.ID(), topic, partition, block.Err)
+				if bp.currentRetries[topic] == nil {
+					bp.currentRetries[topic] = make(map[int32]error)
+				}
+				bp.currentRetries[topic][partition] = block.Err
+				if bp.parent.conf.Producer.Idempotent {
+					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+				} else {
+					bp.parent.retryMessages(pSet.msgs, block.Err)
+				}
+				// dropping the following messages has the side effect of incrementing their retry count
+				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+			}
+		})
+	}
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+	produceSet := newProduceSet(p)
+	produceSet.msgs[topic] = make(map[int32]*partitionSet)
+	produceSet.msgs[topic][partition] = pSet
+	produceSet.bufferBytes += pSet.bufferBytes
+	produceSet.bufferCount += len(pSet.msgs)
+	for _, msg := range pSet.msgs {
+		if msg.retries >= p.conf.Producer.Retry.Max {
+			p.returnErrors(pSet.msgs, kerr)
+			return
+		}
+		msg.retries++
+	}
+
+	// it's expected that a metadata refresh has been requested prior to calling retryBatch
+	leader, err := p.client.Leader(topic, partition)
+	if err != nil {
+		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+		for _, msg := range pSet.msgs {
+			p.returnError(msg, kerr)
+		}
+		return
+	}
+	bp := p.getBrokerProducer(leader)
+	bp.output <- produceSet
+	p.unrefBrokerProducer(leader, bp)
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+	var target PacketEncodingError
+	if errors.As(err, &target) {
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.returnErrors(pSet.msgs, err)
+		})
+	} else {
+		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+		bp.parent.abandonBrokerConnection(bp.broker)
+		_ = bp.broker.Close()
+		bp.closing = err
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.rollOver()
+	}
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+	maxBufferLength := p.conf.Producer.Retry.MaxBufferLength
+	if 0 < maxBufferLength && maxBufferLength < minFunctionalRetryBufferLength {
+		maxBufferLength = minFunctionalRetryBufferLength
+	}
+
+	maxBufferBytes := p.conf.Producer.Retry.MaxBufferBytes
+	if 0 < maxBufferBytes && maxBufferBytes < minFunctionalRetryBufferBytes {
+		maxBufferBytes = minFunctionalRetryBufferBytes
+	}
+
+	version := 1
+	if p.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	var currentByteSize int64
+	var msg *ProducerMessage
+	buf := queue.New()
+
+	for {
+		if buf.Length() == 0 {
+			msg = <-p.retries
+		} else {
+			select {
+			case msg = <-p.retries:
+			case p.input <- buf.Peek().(*ProducerMessage):
+				msgToRemove := buf.Remove().(*ProducerMessage)
+				currentByteSize -= int64(msgToRemove.ByteSize(version))
+				continue
+			}
+		}
+
+		if msg == nil {
+			return
+		}
+
+		buf.Add(msg)
+		currentByteSize += int64(msg.ByteSize(version))
+
+		if (maxBufferLength <= 0 || buf.Length() < maxBufferLength) && (maxBufferBytes <= 0 || currentByteSize < maxBufferBytes) {
+			continue
+		}
+
+		msgToHandle := buf.Peek().(*ProducerMessage)
+		if msgToHandle.flags == 0 {
+			select {
+			case p.input <- msgToHandle:
+				buf.Remove()
+				currentByteSize -= int64(msgToHandle.ByteSize(version))
+			default:
+				buf.Remove()
+				currentByteSize -= int64(msgToHandle.ByteSize(version))
+				p.returnError(msgToHandle, ErrProducerRetryBufferOverflow)
+			}
+		}
+	}
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+	Logger.Println("Producer shutting down.")
+	p.inFlight.Add(1)
+	p.input <- &ProducerMessage{flags: shutdown}
+
+	p.inFlight.Wait()
+
+	err := p.client.Close()
+	if err != nil {
+		Logger.Println("producer/shutdown failed to close the embedded client:", err)
+	}
+
+	close(p.input)
+	close(p.retries)
+	close(p.errors)
+	close(p.successes)
+
+	p.metricsRegistry.UnregisterAll()
+}
+
+func (p *asyncProducer) bumpIdempotentProducerEpoch() {
+	_, epoch := p.txnmgr.getProducerID()
+	if epoch == math.MaxInt16 {
+		Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID")
+		txnmgr, err := newTransactionManager(p.conf, p.client)
+		if err != nil {
+			Logger.Println(err)
+			return
+		}
+
+		p.txnmgr = txnmgr
+	} else {
+		p.txnmgr.bumpEpoch()
+	}
+}
+
+func (p *asyncProducer) maybeTransitionToErrorState(err error) error {
+	if errors.Is(err, ErrClusterAuthorizationFailed) ||
+		errors.Is(err, ErrProducerFenced) ||
+		errors.Is(err, ErrUnsupportedVersion) ||
+		errors.Is(err, ErrTransactionalIDAuthorizationFailed) {
+		return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+	}
+	if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 {
+		p.txnmgr.epochBumpRequired = true
+	}
+	return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	if p.IsTransactional() {
+		_ = p.maybeTransitionToErrorState(err)
+	}
+	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
+	// will never see a message with this number, so we can never continue the sequence.
+	if !p.IsTransactional() && msg.hasSequence {
+		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
+		p.bumpIdempotentProducerEpoch()
+	}
+
+	msg.clear()
+	pErr := &ProducerError{Msg: msg, Err: err}
+	if p.conf.Producer.Return.Errors {
+		p.errors <- pErr
+	} else {
+		Logger.Println(pErr)
+	}
+	p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.returnError(msg, err)
+	}
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+	for _, msg := range batch {
+		if p.conf.Producer.Return.Successes {
+			msg.clear()
+			p.successes <- msg
+		}
+		p.inFlight.Done()
+	}
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+	if msg.retries >= p.conf.Producer.Retry.Max {
+		p.returnError(msg, err)
+	} else {
+		msg.retries++
+		p.retries <- msg
+	}
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.retryMessage(msg, err)
+	}
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bp := p.brokers[broker]
+
+	if bp == nil {
+		bp = p.newBrokerProducer(broker)
+		p.brokers[broker] = bp
+		p.brokerRefs[bp] = 0
+	}
+
+	p.brokerRefs[bp]++
+
+	return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	p.brokerRefs[bp]--
+	if p.brokerRefs[bp] == 0 {
+		close(bp.input)
+		delete(p.brokerRefs, bp)
+
+		if p.brokers[broker] == bp {
+			delete(p.brokers, broker)
+		}
+	}
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bc, ok := p.brokers[broker]
+	if ok && bc.abandoned != nil {
+		close(bc.abandoned)
+	}
+
+	delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go
new file mode 100644
index 0000000..59f8948
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/balance_strategy.go
@@ -0,0 +1,1127 @@
+package sarama
+
+import (
+	"container/heap"
+	"errors"
+	"fmt"
+	"maps"
+	"math"
+	"slices"
+	"sort"
+	"strings"
+)
+
+const (
+	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+	RangeBalanceStrategyName = "range"
+
+	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+	RoundRobinBalanceStrategyName = "roundrobin"
+
+	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+	StickyBalanceStrategyName = "sticky"
+
+	defaultGeneration = -1
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+	if len(partitions) == 0 {
+		return
+	}
+	if _, ok := p[memberID]; !ok {
+		p[memberID] = make(map[string][]int32, 1)
+	}
+	p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across members of a consumer group
+type BalanceStrategy interface {
+	// Name uniquely identifies the strategy.
+	Name() string
+
+	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+	// and returns a distribution plan.
+	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+
+	// AssignmentData returns the serialized assignment data for the specified
+	// memberID
+	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
+}
+
+// --------------------------------------------------------------------
+
+// NewBalanceStrategyRange returns a range balance strategy,
+// which is the default and assigns partitions as ranges to consumer group members.
+// This follows the same logic as
+// https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html
+//
+// Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2):
+//
+//	M1: {T1: [0, 1, 2], T2: [0, 1, 2]}
+//	M2: {T1: [3, 4, 5], T2: [3, 4, 5]}
+func NewBalanceStrategyRange() BalanceStrategy {
+	return &balanceStrategy{
+		name: RangeBalanceStrategyName,
+		coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+			partitionsPerConsumer := len(partitions) / len(memberIDs)
+			consumersWithExtraPartition := len(partitions) % len(memberIDs)
+
+			sort.Strings(memberIDs)
+
+			for i, memberID := range memberIDs {
+				min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i)))
+				extra := 0
+				if i < consumersWithExtraPartition {
+					extra = 1
+				}
+				max := min + partitionsPerConsumer + extra
+				plan.Add(memberID, topic, partitions[min:max]...)
+			}
+		},
+	}
+}
+
+// Deprecated: use NewBalanceStrategyRange to avoid data race issue
+var BalanceStrategyRange = NewBalanceStrategyRange()
+
+// NewBalanceStrategySticky returns a sticky balance strategy,
+// which assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//
+//	M1: {T: [0, 2, 4]}
+//	M2: {T: [1, 3, 5]}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//
+//	M1: {T: [0, 2]}
+//	M2: {T: [1, 3]}
+//	M3: {T: [4, 5]}
+func NewBalanceStrategySticky() BalanceStrategy {
+	return &stickyBalanceStrategy{}
+}
+
+// Deprecated: use NewBalanceStrategySticky to avoid data race issue
+var BalanceStrategySticky = NewBalanceStrategySticky()
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+	name   string
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Plan implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// Build members by topic map
+	mbt := make(map[string][]string)
+	for memberID, meta := range members {
+		for _, topic := range meta.Topics {
+			mbt[topic] = append(mbt[topic], memberID)
+		}
+	}
+
+	// func to sort and de-duplicate a StringSlice
+	uniq := func(ss sort.StringSlice) []string {
+		if ss.Len() < 2 {
+			return ss
+		}
+		sort.Sort(ss)
+		var i, j int
+		for i = 1; i < ss.Len(); i++ {
+			if ss[i] == ss[j] {
+				continue
+			}
+			j++
+			ss.Swap(i, j)
+		}
+		return ss[:j+1]
+	}
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(members))
+	for topic, memberIDs := range mbt {
+		s.coreFn(plan, uniq(memberIDs), topic, topics[topic])
+	}
+	return plan, nil
+}
+
+// AssignmentData simple strategies do not require any shared assignment data
+func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil
+}
+
+type stickyBalanceStrategy struct {
+	movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// track partition movements during generation of the partition assignment plan
+	s.movements = partitionMovements{
+		Movements:                 make(map[topicPartitionAssignment]consumerPair),
+		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+	}
+
+	// prepopulate the current assignment state from userdata on the consumer group members
+	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+	if err != nil {
+		return nil, err
+	}
+
+	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+	isFreshAssignment := len(currentAssignment) == 0
+
+	// create a mapping of all current topic partitions and the consumers that can be assigned to them
+	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+		}
+	}
+
+	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
+	// also, populate the mapping of partitions to potential consumers
+	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+	for memberID, meta := range members {
+		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+		for _, topicSubscription := range meta.Topics {
+			// only evaluate topic subscriptions that are present in the supplied topics map
+			if _, found := topics[topicSubscription]; found {
+				for _, partition := range topics[topicSubscription] {
+					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+				}
+			}
+		}
+
+		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+		if _, exists := currentAssignment[memberID]; !exists {
+			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+		}
+	}
+
+	// create a mapping of each partition to its current consumer, where possible
+	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unvisitedPartitions[partition] = true
+	}
+	var unassignedPartitions []topicPartitionAssignment
+	for memberID, partitions := range currentAssignment {
+		var keepPartitions []topicPartitionAssignment
+		for _, partition := range partitions {
+			// If this partition no longer exists at all, likely due to the
+			// topic being deleted, we remove the partition from the member.
+			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+				continue
+			}
+			delete(unvisitedPartitions, partition)
+			currentPartitionConsumers[partition] = memberID
+
+			if !slices.Contains(members[memberID].Topics, partition.Topic) {
+				unassignedPartitions = append(unassignedPartitions, partition)
+				continue
+			}
+			keepPartitions = append(keepPartitions, partition)
+		}
+		currentAssignment[memberID] = keepPartitions
+	}
+	for unvisited := range unvisitedPartitions {
+		unassignedPartitions = append(unassignedPartitions, unvisited)
+	}
+
+	// sort the topic partitions in order of priority for reassignment
+	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+	// at this point we have preserved all valid topic partition to consumer assignments and removed
+	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+	// to consumers so that the topic partition assignments are as balanced as possible.
+
+	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(currentAssignment))
+	for memberID, assignments := range currentAssignment {
+		if len(assignments) == 0 {
+			plan[memberID] = make(map[string][]int32)
+		} else {
+			for _, assignment := range assignments {
+				plan.Add(memberID, assignment.Topic, assignment.Partition)
+			}
+		}
+	}
+	return plan, nil
+}
+
+// AssignmentData serializes the set of topics currently assigned to the
+// specified member as part of the supplied balance plan
+func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return encode(&StickyAssignorUserDataV1{
+		Topics:     topics,
+		Generation: generationID,
+	}, nil)
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+	initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0
+
+	// assign all unassigned partitions
+	for _, partition := range unassignedPartitions {
+		// skip if there is no potential consumer for the partition
+		if len(partition2AllPotentialConsumers[partition]) == 0 {
+			continue
+		}
+		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+	}
+
+	// narrow down the reassignment scope to only those partitions that can actually be reassigned
+	for partition := range partition2AllPotentialConsumers {
+		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+		}
+	}
+
+	// narrow down the reassignment scope to only those consumers that are subject to reassignment
+	fixedAssignments := make(map[string][]topicPartitionAssignment)
+	for memberID := range consumer2AllPotentialPartitions {
+		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+			fixedAssignments[memberID] = currentAssignment[memberID]
+			delete(currentAssignment, memberID)
+			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+		}
+	}
+
+	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+	preBalanceAssignment := deepCopyAssignment(currentAssignment)
+	preBalancePartitionConsumers := maps.Clone(currentPartitionConsumer)
+
+	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+	// if we are not preserving existing assignments and we have made changes to the current assignment
+	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+		currentAssignment = deepCopyAssignment(preBalanceAssignment)
+		clear(currentPartitionConsumer)
+		maps.Copy(currentPartitionConsumer, preBalancePartitionConsumers)
+	}
+
+	// add the fixed assignments (those that could not change) back
+	maps.Copy(currentAssignment, fixedAssignments)
+}
+
+// NewBalanceStrategyRoundRobin returns a round-robin balance strategy,
+// which assigns partitions to members in alternating order.
+// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
+// M0: [t0p0, t0p2, t1p1]
+// M1: [t0p1, t1p0, t1p2]
+func NewBalanceStrategyRoundRobin() BalanceStrategy {
+	return new(roundRobinBalancer)
+}
+
+// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue
+var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin()
+
+type roundRobinBalancer struct{}
+
+func (b *roundRobinBalancer) Name() string {
+	return RoundRobinBalanceStrategyName
+}
+
+func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	if len(memberAndMetadata) == 0 || len(topics) == 0 {
+		return nil, errors.New("members and topics are not provided")
+	}
+	// sort partitions
+	var topicPartitions []topicAndPartition
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
+		}
+	}
+	sort.SliceStable(topicPartitions, func(i, j int) bool {
+		pi := topicPartitions[i]
+		pj := topicPartitions[j]
+		return pi.comparedValue() < pj.comparedValue()
+	})
+
+	// sort members
+	var members []memberAndTopic
+	for memberID, meta := range memberAndMetadata {
+		m := memberAndTopic{
+			memberID: memberID,
+			topics:   make(map[string]struct{}),
+		}
+		for _, t := range meta.Topics {
+			m.topics[t] = struct{}{}
+		}
+		members = append(members, m)
+	}
+	sort.SliceStable(members, func(i, j int) bool {
+		mi := members[i]
+		mj := members[j]
+		return mi.memberID < mj.memberID
+	})
+
+	// assign partitions
+	plan := make(BalanceStrategyPlan, len(members))
+	i := 0
+	n := len(members)
+	for _, tp := range topicPartitions {
+		m := members[i%n]
+		for !m.hasTopic(tp.topic) {
+			i++
+			m = members[i%n]
+		}
+		plan.Add(m.memberID, tp.topic, tp.partition)
+		i++
+	}
+	return plan, nil
+}
+
+func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil // do nothing for now
+}
+
+type topicAndPartition struct {
+	topic     string
+	partition int32
+}
+
+func (tp *topicAndPartition) comparedValue() string {
+	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
+}
+
+type memberAndTopic struct {
+	topics   map[string]struct{}
+	memberID string
+}
+
+func (m *memberAndTopic) hasTopic(topic string) bool {
+	_, isExist := m.topics[topic]
+	return isExist
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+	consumer2AssignmentSize := make(map[string]int, len(assignment))
+	for memberID, partitions := range assignment {
+		consumer2AssignmentSize[memberID] = len(partitions)
+	}
+
+	var score float64
+	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+		delete(consumer2AssignmentSize, memberID)
+		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+		}
+	}
+	return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+	if min >= max-1 {
+		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+		return true
+	}
+
+	// create a mapping from partitions to the consumer assigned to them
+	allPartitions := make(map[topicPartitionAssignment]string)
+	for memberID, partitions := range currentAssignment {
+		for _, partition := range partitions {
+			if _, exists := allPartitions[partition]; exists {
+				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+			}
+			allPartitions[partition] = memberID
+		}
+	}
+
+	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+	// could but did not get cannot be moved to it (because that would break the balance)
+	for _, memberID := range sortedCurrentSubscriptions {
+		consumerPartitions := currentAssignment[memberID]
+		consumerPartitionCount := len(consumerPartitions)
+
+		// skip if this consumer already has all the topic partitions it can get
+		if consumerPartitionCount == len(allSubscriptions[memberID]) {
+			continue
+		}
+
+		// otherwise make sure it cannot get any more
+		potentialTopicPartitions := allSubscriptions[memberID]
+		for _, partition := range potentialTopicPartitions {
+			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+				otherConsumer := allPartitions[partition]
+				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+				if consumerPartitionCount < otherConsumerPartitionCount {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+	reassignmentPerformed := false
+	modified := false
+
+	// repeat reassignment until no partition can be moved to improve the balance
+	for {
+		modified = false
+		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+		// until the full list is processed or a balance is achieved
+		for _, partition := range reassignablePartitions {
+			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
+				break
+			}
+
+			// the partition must have at least two consumers
+			if len(partition2AllPotentialConsumers[partition]) <= 1 {
+				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+			}
+
+			// the partition must have a consumer
+			consumer := currentPartitionConsumer[partition]
+			if consumer == "" {
+				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+			}
+
+			if _, exists := prevAssignment[partition]; exists {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+					reassignmentPerformed = true
+					modified = true
+					continue
+				}
+			}
+
+			// check if a better-suited consumer exists for the partition; if so, reassign it
+			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+					reassignmentPerformed = true
+					modified = true
+					break
+				}
+			}
+		}
+		if !modified {
+			return reassignmentPerformed
+		}
+	}
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+	for _, anotherConsumer := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+		}
+	}
+	return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+	consumer := currentPartitionConsumer[partition]
+	// find the correct partition movement considering the stickiness requirement
+	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	oldConsumer := currentPartitionConsumer[partition]
+	s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+	currentPartitionConsumer[partition] = newConsumer
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	currentPartitions := currentAssignment[memberID]
+	currentAssignmentSize := len(currentPartitions)
+	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+	if currentAssignmentSize > maxAssignmentSize {
+		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+	}
+	if currentAssignmentSize < maxAssignmentSize {
+		// if a consumer is not assigned all its potential partitions it is subject to reassignment
+		return true
+	}
+	for _, partition := range currentPartitions {
+		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			return true
+		}
+	}
+	return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	for _, memberID := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+			currentPartitionConsumer[partition] = memberID
+			break
+		}
+	}
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+	userDataV1 := &StickyAssignorUserDataV1{}
+	if err := decode(userDataBytes, userDataV1, nil); err != nil {
+		userDataV0 := &StickyAssignorUserDataV0{}
+		if err := decode(userDataBytes, userDataV0, nil); err != nil {
+			return nil, err
+		}
+		return userDataV0, nil
+	}
+	return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+	assignments := deepCopyAssignment(currentAssignment)
+	for memberID, partitions := range assignments {
+		// perform in-place filtering
+		i := 0
+		for _, partition := range partitions {
+			if _, exists := partition2AllPotentialConsumers[partition]; exists {
+				partitions[i] = partition
+				i++
+			}
+		}
+		assignments[memberID] = partitions[:i]
+	}
+	return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+	for i, assignment := range assignments {
+		if assignment == topic {
+			return append(assignments[:i], assignments[i+1:]...)
+		}
+	}
+	return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+	return slices.Contains(assignments, topic)
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unassignedPartitions[partition] = true
+	}
+
+	sortedPartitions := make([]topicPartitionAssignment, 0)
+	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+		// then we just need to simply list partitions in a round robin fashion (from consumers with
+		// most assigned partitions to those with least)
+		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+		// use priority-queue to evaluate consumer group members in descending-order based on
+		// the number of topic partition assignments (i.e. consumers with most assignments first)
+		pq := make(assignmentPriorityQueue, len(assignments))
+		i := 0
+		for consumerID, consumerAssignments := range assignments {
+			pq[i] = &consumerGroupMember{
+				id:          consumerID,
+				assignments: consumerAssignments,
+			}
+			i++
+		}
+		heap.Init(&pq)
+
+		// loop until no consumer-group members remain
+		for pq.Len() != 0 {
+			member := pq[0]
+
+			// partitions that were assigned to a different consumer last time
+			var prevPartitionIndex int
+			for i, partition := range member.assignments {
+				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+					prevPartitionIndex = i
+					break
+				}
+			}
+
+			if len(member.assignments) > 0 {
+				partition := member.assignments[prevPartitionIndex]
+				sortedPartitions = append(sortedPartitions, partition)
+				delete(unassignedPartitions, partition)
+				if prevPartitionIndex == 0 {
+					member.assignments = member.assignments[1:]
+				} else {
+					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+				}
+				heap.Fix(&pq, 0)
+			} else {
+				heap.Pop(&pq)
+			}
+		}
+
+		for partition := range unassignedPartitions {
+			sortedPartitions = append(sortedPartitions, partition)
+		}
+	} else {
+		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
+		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+	}
+	return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+	// sort the members by the number of partition assignments in ascending order
+	sortedMemberIDs := make([]string, 0, len(assignments))
+	for memberID := range assignments {
+		sortedMemberIDs = append(sortedMemberIDs, memberID)
+	}
+	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+		if ret == 0 {
+			return sortedMemberIDs[i] < sortedMemberIDs[j]
+		}
+		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+	})
+	return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+	// sort the members by the number of partition assignments in descending order
+	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+	i := 0
+	for partition := range partition2AllPotentialConsumers {
+		sortedPartionIDs[i] = partition
+		i++
+	}
+	sort.Slice(sortedPartionIDs, func(i, j int) bool {
+		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+			if ret == 0 {
+				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+			}
+			return ret < 0
+		}
+		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+	})
+	return sortedPartionIDs
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+	m := make(map[string][]topicPartitionAssignment, len(assignment))
+	for memberID, subscriptions := range assignment {
+		m[memberID] = append(subscriptions[:0:0], subscriptions...)
+	}
+	return m
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+	curMembers := make(map[string]int)
+	for _, cur := range partition2AllPotentialConsumers {
+		if len(curMembers) == 0 {
+			for _, curMembersElem := range cur {
+				curMembers[curMembersElem]++
+			}
+			continue
+		}
+
+		if len(curMembers) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[string]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curMembers {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+
+	curPartitions := make(map[topicPartitionAssignment]int)
+	for _, cur := range consumer2AllPotentialPartitions {
+		if len(curPartitions) == 0 {
+			for _, curPartitionElem := range cur {
+				curPartitions[curPartitionElem]++
+			}
+			continue
+		}
+
+		if len(curPartitions) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[topicPartitionAssignment]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curPartitions {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+	currentAssignment := make(map[string][]topicPartitionAssignment)
+	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+	// for each partition we create a sorted map of its consumers by generation
+	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+	for memberID, meta := range members {
+		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, partition := range consumerUserData.partitions() {
+			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+				if consumerUserData.hasGeneration() {
+					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+						// same partition is assigned to two consumers during the same rebalance.
+						// log a warning and skip this record
+						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+						continue
+					} else {
+						consumers[consumerUserData.generation()] = memberID
+					}
+				} else {
+					consumers[defaultGeneration] = memberID
+				}
+			} else {
+				generation := defaultGeneration
+				if consumerUserData.hasGeneration() {
+					generation = consumerUserData.generation()
+				}
+				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+			}
+		}
+	}
+
+	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+	// current and previous consumers are the last two consumers of each partition in the above sorted map
+	for partition, consumers := range sortedPartitionConsumersByGeneration {
+		// sort consumers by generation in decreasing order
+		var generations []int
+		for generation := range consumers {
+			generations = append(generations, generation)
+		}
+		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+		consumer := consumers[generations[0]]
+		if _, exists := currentAssignment[consumer]; !exists {
+			currentAssignment[consumer] = []topicPartitionAssignment{partition}
+		} else {
+			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+		}
+
+		// check for previous assignment, if any
+		if len(generations) > 1 {
+			prevAssignment[partition] = consumerGenerationPair{
+				MemberID:   consumers[generations[1]],
+				Generation: generations[1],
+			}
+		}
+	}
+	return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+	MemberID   string
+	Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+	SrcMemberID string
+	DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+	Movements                 map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+	pair := p.Movements[partition]
+	delete(p.Movements, partition)
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	delete(partitionMovementsForThisTopic[pair], partition)
+	if len(partitionMovementsForThisTopic[pair]) == 0 {
+		delete(partitionMovementsForThisTopic, pair)
+	}
+	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+		delete(p.PartitionMovementsByTopic, partition.Topic)
+	}
+	return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+	p.Movements[partition] = pair
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+	pair := consumerPair{
+		SrcMemberID: oldConsumer,
+		DstMemberID: newConsumer,
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		existingPair := p.removeMovementRecordOfPartition(partition)
+		if existingPair.DstMemberID != oldConsumer {
+			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+		}
+		if existingPair.SrcMemberID != newConsumer {
+			// the partition is not moving back to its previous consumer
+			p.addPartitionMovementRecord(partition, consumerPair{
+				SrcMemberID: existingPair.SrcMemberID,
+				DstMemberID: newConsumer,
+			})
+		}
+	} else {
+		p.addPartitionMovementRecord(partition, pair)
+	}
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		return partition
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		if oldConsumer != p.Movements[partition].DstMemberID {
+			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+		}
+		oldConsumer = p.Movements[partition].SrcMemberID
+	}
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	reversePair := consumerPair{
+		SrcMemberID: newConsumer,
+		DstMemberID: oldConsumer,
+	}
+	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+		return partition
+	}
+	var reversePairPartition topicPartitionAssignment
+	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+		reversePairPartition = otherPartition
+	}
+	return reversePairPartition
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+	if src == dst {
+		return currentPath, false
+	}
+	if len(pairs) == 0 {
+		return currentPath, false
+	}
+	for _, pair := range pairs {
+		if src == pair.SrcMemberID && dst == pair.DstMemberID {
+			currentPath = append(currentPath, src, dst)
+			return currentPath, true
+		}
+	}
+
+	for _, pair := range pairs {
+		if pair.SrcMemberID != src {
+			continue
+		}
+		// create a deep copy of the pairs, excluding the current pair
+		reducedSet := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedSet[i] = pair
+				i++
+			}
+		}
+
+		currentPath = append(currentPath, pair.SrcMemberID)
+		return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+	}
+	return currentPath, false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+	superCycle := make([]string, len(cycle)-1)
+	for i := 0; i < len(cycle)-1; i++ {
+		superCycle[i] = cycle[i]
+	}
+	superCycle = append(superCycle, cycle...)
+	for _, foundCycle := range cycles {
+		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+			return true
+		}
+	}
+	return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+	cycles := make([][]string, 0)
+	for _, pair := range pairs {
+		// create a deep copy of the pairs, excluding the current pair
+		reducedPairs := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedPairs[i] = pair
+				i++
+			}
+		}
+		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+			if !p.in(path, cycles) {
+				cycles = append(cycles, path)
+				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+			}
+		}
+	}
+
+	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+	for _, cycle := range cycles {
+		if len(cycle) == 3 {
+			return true
+		}
+	}
+	return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isSticky() bool {
+	for topic, movements := range p.PartitionMovementsByTopic {
+		movementPairs := make([]consumerPair, len(movements))
+		i := 0
+		for pair := range movements {
+			movementPairs[i] = pair
+			i++
+		}
+		if p.hasCycles(movementPairs) {
+			Logger.Printf("Stickiness is violated for topic %s", topic)
+			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+			return false
+		}
+	}
+	return true
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func indexOfSubList(source []string, target []string) int {
+	targetSize := len(target)
+	maxCandidate := len(source) - targetSize
+nextCand:
+	for candidate := 0; candidate <= maxCandidate; candidate++ {
+		j := candidate
+		for i := 0; i < targetSize; i++ {
+			if target[i] != source[j] {
+				// Element mismatch, try next cand
+				continue nextCand
+			}
+			j++
+		}
+		// All elements of candidate matched target
+		return candidate
+	}
+	return -1
+}
+
+type consumerGroupMember struct {
+	id          string
+	assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+	// order assignment priority queue in descending order using assignment-count/member-id
+	if len(pq[i].assignments) == len(pq[j].assignments) {
+		return pq[i].id > pq[j].id
+	}
+	return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+	member := x.(*consumerGroupMember)
+	*pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	member := old[n-1]
+	*pq = old[0 : n-1]
+	return member
+}
diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go
new file mode 100644
index 0000000..7c559cf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/broker.go
@@ -0,0 +1,1869 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+	conf *Config
+	rack *string
+
+	id            int32
+	addr          string
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
+	opened        atomic.Bool
+	responses     chan *responsePromise
+	done          chan bool
+
+	metricRegistry             metrics.Registry
+	incomingByteRate           metrics.Meter
+	requestRate                metrics.Meter
+	fetchRate                  metrics.Meter
+	requestSize                metrics.Histogram
+	requestLatency             metrics.Histogram
+	outgoingByteRate           metrics.Meter
+	responseRate               metrics.Meter
+	responseSize               metrics.Histogram
+	requestsInFlight           metrics.Counter
+	protocolRequestsRate       map[int16]metrics.Meter
+	brokerIncomingByteRate     metrics.Meter
+	brokerRequestRate          metrics.Meter
+	brokerFetchRate            metrics.Meter
+	brokerRequestSize          metrics.Histogram
+	brokerRequestLatency       metrics.Histogram
+	brokerOutgoingByteRate     metrics.Meter
+	brokerResponseRate         metrics.Meter
+	brokerResponseSize         metrics.Histogram
+	brokerRequestsInFlight     metrics.Counter
+	brokerThrottleTime         metrics.Histogram
+	brokerProtocolRequestsRate map[int16]metrics.Meter
+	brokerAPIVersions          apiVersionMap
+
+	kerberosAuthenticator               GSSAPIKerberosAuth
+	clientSessionReauthenticationTimeMs int64
+
+	throttleTimer     *time.Timer
+	throttleTimerLock sync.Mutex
+}
+
+// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
+type SASLMechanism string
+
+const (
+	// SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
+	SASLTypeOAuth = "OAUTHBEARER"
+	// SASLTypePlaintext represents the SASL/PLAIN mechanism
+	SASLTypePlaintext = "PLAIN"
+	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+	SASLTypeGSSAPI      = "GSSAPI"
+	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL auth using opaque packets.
+	SASLHandshakeV0 = int16(0)
+	// SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
+	SASLHandshakeV1 = int16(1)
+	// SASLExtKeyAuth is the reserved extension key name sent as part of the
+	// SASL/OAUTHBEARER initial client response
+	SASLExtKeyAuth = "auth"
+)
+
+// AccessToken contains an access token used to authenticate a
+// SASL/OAUTHBEARER client along with associated metadata.
+type AccessToken struct {
+	// Token is the access token payload.
+	Token string
+	// Extensions is a optional map of arbitrary key-value pairs that can be
+	// sent with the SASL/OAUTHBEARER initial client response. These values are
+	// ignored by the SASL server if they are unexpected. This feature is only
+	// supported by Kafka >= 2.1.0.
+	Extensions map[string]string
+}
+
+// AccessTokenProvider is the interface that encapsulates how implementors
+// can generate access tokens for Kafka broker authentication.
+type AccessTokenProvider interface {
+	// Token returns an access token. The implementation should ensure token
+	// reuse so that multiple calls at connect time do not create multiple
+	// tokens. The implementation should also periodically refresh the token in
+	// order to guarantee that each call returns an unexpired token.  This
+	// method should not block indefinitely--a timeout error should be returned
+	// after a short period of inactivity so that the broker connection logic
+	// can log debugging information and retry.
+	Token() (*AccessToken, error)
+}
+
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+	// Begin prepares the client for the SCRAM exchange
+	// with the server with a user name and a password
+	Begin(userName, password, authzID string) error
+	// Step steps client through the SCRAM exchange. It is
+	// called repeatedly until it errors or `Done` returns true.
+	Step(challenge string) (response string, err error)
+	// Done should return true when the SCRAM conversation
+	// is over.
+	Done() bool
+}
+
+type responsePromise struct {
+	requestTime   time.Time
+	correlationID int32
+	response      protocolBody
+	handler       func([]byte, error)
+	packets       chan []byte
+	errors        chan error
+}
+
+func (p *responsePromise) handle(packets []byte, err error) {
+	// Use callback when provided
+	if p.handler != nil {
+		p.handler(packets, err)
+		return
+	}
+	// Otherwise fallback to using channels
+	if err != nil {
+		p.errors <- err
+		return
+	}
+	p.packets <- packets
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+	return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+	if !b.opened.CompareAndSwap(false, true) {
+		return ErrAlreadyConnected
+	}
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	err := conf.Validate()
+	if err != nil {
+		return err
+	}
+
+	b.lock.Lock()
+
+	if b.metricRegistry == nil {
+		b.metricRegistry = newCleanupRegistry(conf.MetricRegistry)
+	}
+
+	go withRecover(func() {
+		defer b.lock.Unlock()
+
+		dialer := conf.getDialer()
+		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+		if b.connErr != nil {
+			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+			b.conn = nil
+			b.opened.Store(false)
+			return
+		}
+		if conf.Net.TLS.Enable {
+			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
+		}
+
+		b.conn = newBufConn(b.conn)
+		b.conf = conf
+
+		// Create or reuse the global metrics shared between brokers
+		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry)
+		b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry)
+		b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry)
+		b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry)
+		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry)
+		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry)
+		b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry)
+		b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry)
+		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry)
+		b.protocolRequestsRate = map[int16]metrics.Meter{}
+		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
+		// the same id (-1) and are already exposed through the global metrics above
+		if b.id >= 0 && !metrics.UseNilMetrics {
+			b.registerMetrics()
+		}
+
+		// Send an ApiVersionsRequest to identify the client (KIP-511).
+		// Store the response in the brokerAPIVersions map.
+		// It will be used to determine the supported API versions for each request.
+		// This should happen before SASL authentication: https://kafka.apache.org/26/protocol.html#api_versions
+		if conf.ApiVersionsRequest {
+			apiVersionsResponse, err := b.sendAndReceiveApiVersions(3)
+			if err != nil {
+				Logger.Printf("Error while sending ApiVersionsRequest V3 to broker %s: %s\n", b.addr, err)
+				// send a lower version request in case remote cluster is <= 2.4.0.0
+				maxVersion := int16(0)
+				if apiVersionsResponse != nil {
+					for _, k := range apiVersionsResponse.ApiKeys {
+						if k.ApiKey == apiKeyApiVersions {
+							maxVersion = k.MaxVersion
+							break
+						}
+					}
+				}
+				apiVersionsResponse, err = b.sendAndReceiveApiVersions(maxVersion)
+				if err != nil {
+					Logger.Printf("Error while sending ApiVersionsRequest V%d to broker %s: %s\n", maxVersion, b.addr, err)
+				}
+			}
+			if apiVersionsResponse != nil {
+				b.brokerAPIVersions = make(apiVersionMap, len(apiVersionsResponse.ApiKeys))
+				for _, key := range apiVersionsResponse.ApiKeys {
+					b.brokerAPIVersions[key.ApiKey] = &apiVersionRange{
+						minVersion: key.MinVersion,
+						maxVersion: key.MaxVersion,
+					}
+				}
+			}
+		}
+
+		if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 {
+			conf.Net.SASL.Version = SASLHandshakeV1
+		}
+
+		useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0
+		if conf.Net.SASL.Enable && useSaslV0 {
+			b.connErr = b.authenticateViaSASLv0()
+
+			if b.connErr != nil {
+				err = b.conn.Close()
+				if err == nil {
+					DebugLogger.Printf("Closed connection to broker %s due to SASL v0 auth error: %s\n", b.addr, b.connErr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s (due to SASL v0 auth error: %s): %s\n", b.addr, b.connErr, err)
+				}
+				b.conn = nil
+				b.opened.Store(false)
+				return
+			}
+		}
+
+		b.done = make(chan bool)
+		b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+		go withRecover(b.responseReceiver)
+		if conf.Net.SASL.Enable && !useSaslV0 {
+			b.connErr = b.authenticateViaSASLv1()
+			if b.connErr != nil {
+				close(b.responses)
+				<-b.done
+				err = b.conn.Close()
+				if err == nil {
+					DebugLogger.Printf("Closed connection to broker %s due to SASL v1 auth error: %s\n", b.addr, b.connErr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s (due to SASL v1 auth error: %s): %s\n", b.addr, b.connErr, err)
+				}
+				b.conn = nil
+				b.opened.Store(false)
+				return
+			}
+		}
+		if b.id >= 0 {
+			DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+		} else {
+			DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+		}
+	})
+
+	return nil
+}
+
+func (b *Broker) ResponseSize() int {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return len(b.responses)
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.conn != nil, b.connErr
+}
+
+// TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established.
+func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return state, false
+	}
+	conn := b.conn
+	if bconn, ok := b.conn.(*bufConn); ok {
+		conn = bconn.Conn
+	}
+	if tc, ok := conn.(*tls.Conn); ok {
+		return tc.ConnectionState(), true
+	}
+	return state, false
+}
+
+// Close closes the broker resources
+func (b *Broker) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return ErrNotConnected
+	}
+
+	close(b.responses)
+	<-b.done
+
+	err := b.conn.Close()
+
+	b.conn = nil
+	b.connErr = nil
+	b.done = nil
+	b.responses = nil
+
+	b.metricRegistry.UnregisterAll()
+
+	if err == nil {
+		DebugLogger.Printf("Closed connection to broker %s\n", b.addr)
+	} else {
+		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+	}
+	b.opened.Store(false)
+
+	return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+	return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+	return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known.  The returned value corresponds to the
+// broker's broker.rack configuration setting.  Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+	if b.rack == nil {
+		return ""
+	}
+	return *b.rack
+}
+
+// GetMetadata send a metadata request and returns a metadata response or error
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+	response := new(MetadataResponse)
+	response.Version = request.Version // Required to ensure use of the correct response header version
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+	response := new(ConsumerMetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// FindCoordinator sends a find coordinate request and returns a response or error
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+	response := new(FindCoordinatorResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// GetAvailableOffsets return an offset response or error
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+	response := new(OffsetResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ProduceCallback function is called once the produce response has been parsed
+// or could not be read.
+type ProduceCallback func(*ProduceResponse, error)
+
+// AsyncProduce sends a produce request and eventually call the provided callback
+// with a produce response or an error.
+//
+// Waiting for the response is generally not blocking on the contrary to using Produce.
+// If the maximum number of in flight request configured is reached then
+// the request will be blocked till a previous response is received.
+//
+// When configured with RequiredAcks == NoResponse, the callback will not be invoked.
+// If an error is returned because the request could not be sent then the callback
+// will not be invoked either.
+//
+// Make sure not to Close the broker in the callback as it will lead to a deadlock.
+func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	needAcks := request.RequiredAcks != NoResponse
+	// Use a nil promise when no acks is required
+	var promise *responsePromise
+
+	if needAcks {
+		metricRegistry := b.metricRegistry
+
+		// Create ProduceResponse early to provide the header version
+		res := new(ProduceResponse)
+		promise = &responsePromise{
+			response: res,
+			// Packets will be converted to a ProduceResponse in the responseReceiver goroutine
+			handler: func(packets []byte, err error) {
+				if err != nil {
+					// Failed request
+					cb(nil, err)
+					return
+				}
+
+				if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil {
+					// Malformed response
+					cb(nil, err)
+					return
+				}
+
+				// Well-formed response
+				b.handleThrottledResponse(res)
+				cb(res, nil)
+			},
+		}
+	}
+
+	return b.sendWithPromise(request, promise)
+}
+
+// Produce returns a produce response or error
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+	var (
+		response *ProduceResponse
+		err      error
+	)
+
+	if request.RequiredAcks == NoResponse {
+		err = b.sendAndReceive(request, nil)
+	} else {
+		response = new(ProduceResponse)
+		err = b.sendAndReceive(request, response)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// Fetch returns a FetchResponse or error
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+	defer func() {
+		if b.fetchRate != nil {
+			b.fetchRate.Mark(1)
+		}
+		if b.brokerFetchRate != nil {
+			b.brokerFetchRate.Mark(1)
+		}
+	}()
+
+	response := new(FetchResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CommitOffset return an Offset commit response or error
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	response := new(OffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// FetchOffset returns an offset fetch response or error
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+	response := new(OffsetFetchResponse)
+	response.Version = request.Version // needed to handle the two header versions
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// JoinGroup returns a join group response or error
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+	response := new(JoinGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// SyncGroup returns a sync group response or error
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+	response := new(SyncGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// LeaveGroup return a leave group response or error
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	response := new(LeaveGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// Heartbeat returns a heartbeat response or error
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+	response := new(HeartbeatResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListGroups return a list group response or error
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+	response := new(ListGroupsResponse)
+	response.Version = request.Version // Required to ensure use of the correct response header version
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeGroups return describe group response or error
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+	response := new(DescribeGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ApiVersions return api version response or error
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+	response := new(ApiVersionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreateTopics send a create topic request and returns create topic response
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	response := new(CreateTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteTopics sends a delete topic request and returns delete topic response
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	response := new(DeleteTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreatePartitions sends a create partition request and returns create
+// partitions response or error
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+	response := new(CreatePartitionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterPartitionReassignments sends a alter partition reassignments request and
+// returns alter partition reassignments response
+func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
+	response := new(AlterPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListPartitionReassignments sends a list partition reassignments request and
+// returns list partition reassignments response
+func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
+	response := new(ListPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ElectLeaders sends aa elect leaders request and returns list partitions elect result
+func (b *Broker) ElectLeaders(request *ElectLeadersRequest) (*ElectLeadersResponse, error) {
+	response := new(ElectLeadersResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteRecords send a request to delete records and return delete record
+// response or error
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+	response := new(DeleteRecordsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeAcls sends a describe acl request and returns a response or error
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+	response := new(DescribeAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreateAcls sends a create acl request and returns a response or error
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+	response := new(CreateAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	errs := make([]error, 0)
+	for _, res := range response.AclCreationResponses {
+		if !errors.Is(res.Err, ErrNoError) {
+			errs = append(errs, res.Err)
+		}
+	}
+
+	if len(errs) > 0 {
+		return response, Wrap(ErrCreateACLs, errs...)
+	}
+
+	return response, nil
+}
+
+// DeleteAcls sends a delete acl request and returns a response or error
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+	response := new(DeleteAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// InitProducerID sends an init producer request and returns a response or error
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	response := new(InitProducerIDResponse)
+	response.Version = request.version()
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AddPartitionsToTxn send a request to add partition to txn and returns
+// a response or error
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+	response := new(AddPartitionsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+// or error
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+	response := new(AddOffsetsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// EndTxn sends a request to end txn and returns a response or error
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+	response := new(EndTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// TxnOffsetCommit sends a request to commit transaction offsets and returns
+// a response or error
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+	response := new(TxnOffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeConfigs sends a request to describe config and returns a response or
+// error
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	response := new(DescribeConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterConfigs sends a request to alter config and return a response or error
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	response := new(AlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
+func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
+	response := new(IncrementalAlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteGroups sends a request to delete groups and returns a response or error
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+	response := new(DeleteGroupsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteOffsets sends a request to delete group offsets and returns a response or error
+func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) {
+	response := new(DeleteOffsetsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+	response := new(DescribeLogDirsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeUserScramCredentials sends a request to get SCRAM users
+func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+	res := new(DescribeUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, err
+}
+
+func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+	res := new(AlterUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// DescribeClientQuotas sends a request to get the broker's quotas
+func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) {
+	response := new(DescribeClientQuotasResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterClientQuotas sends a request to alter the broker's quotas
+func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) {
+	response := new(AlterClientQuotasResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+		return 0, err
+	}
+
+	return io.ReadFull(b.conn, buf)
+}
+
+// write ensures the conn Deadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+	now := time.Now()
+	if err := b.conn.SetWriteDeadline(now.Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+	// TLS connections require both read and write deadlines to be set
+	// to avoid handshake indefinite blocking
+	// see https://github.com/golang/go/blob/go1.23.0/src/crypto/tls/conn.go#L1192-L1195
+	if b.conf.Net.TLS.Enable {
+		if err := b.conn.SetReadDeadline(now.Add(b.conf.Net.ReadTimeout)); err != nil {
+			return 0, err
+		}
+	}
+
+	return b.conn.Write(buf)
+}
+
+// b.lock must be held by caller
+//
+// a non-nil res results in a response promise being created
+func (b *Broker) send(req, res protocolBody) (*responsePromise, error) {
+	var promise *responsePromise
+	if res != nil {
+		// Packets or error will be sent to the following channels
+		// once the response is received
+		promise = makeResponsePromise(res)
+	}
+
+	if err := b.sendWithPromise(req, promise); err != nil {
+		return nil, err
+	}
+
+	return promise, nil
+}
+
+func makeResponsePromise(res protocolBody) *responsePromise {
+	promise := &responsePromise{
+		response: res,
+		packets:  make(chan []byte),
+		errors:   make(chan error),
+	}
+	return promise
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error {
+	if b.conn == nil {
+		if b.connErr != nil {
+			return b.connErr
+		}
+		return ErrNotConnected
+	}
+
+	if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs {
+		err := b.authenticateViaSASLv1()
+		if err != nil {
+			return err
+		}
+	}
+
+	return b.sendInternal(rb, promise)
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error {
+	// try restricting API version to ranges advertised by the broker
+	if err := restrictApiVersion(rb, b.brokerAPIVersions); err != nil {
+		return err
+	}
+
+	// response versions must always match their corresponding request's
+	if promise != nil && promise.response != nil {
+		promise.response.setVersion(rb.version())
+	}
+
+	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+		return ErrUnsupportedVersion
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+
+	// check and wait if throttled
+	b.waitIfThrottled()
+
+	requestTime := time.Now()
+	// Will be decremented in responseReceiver (except error or request with NoResponse)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	b.updateProtocolMetrics(rb)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		return err
+	}
+	b.correlationID++
+
+	if promise == nil {
+		// Record request latency without the response
+		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
+		return nil
+	}
+
+	promise.requestTime = requestTime
+	promise.correlationID = req.correlationID
+	b.responses <- promise
+
+	return nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	promise, err := b.send(req, res)
+	if err != nil {
+		return err
+	}
+
+	if promise == nil {
+		return nil
+	}
+
+	err = handleResponsePromise(req, res, promise, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+	if res != nil {
+		b.handleThrottledResponse(res)
+	}
+	return nil
+}
+
+func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error {
+	select {
+	case buf := <-promise.packets:
+		return versionedDecode(buf, res, req.version(), metricRegistry)
+	case err := <-promise.errors:
+		return err
+	}
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+	b.id, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	host, err := pd.getString()
+	if err != nil {
+		return err
+	}
+
+	port, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		b.rack, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+	if _, _, err := net.SplitHostPort(b.addr); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+	host, portstr, err := net.SplitHostPort(b.addr)
+	if err != nil {
+		return err
+	}
+
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(b.id)
+
+	err = pe.putString(host)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(int32(port))
+
+	if version >= 1 {
+		err = pe.putNullableString(b.rack)
+		if err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *Broker) responseReceiver() {
+	var dead error
+
+	for promise := range b.responses {
+		if dead != nil {
+			// This was previously incremented in send() and
+			// we are not calling updateIncomingCommunicationMetrics()
+			b.addRequestInFlightMetrics(-1)
+			promise.handle(nil, dead)
+			continue
+		}
+
+		headerLength := getHeaderLength(promise.response.headerVersion())
+		header := make([]byte, headerLength)
+
+		bytesReadHeader, err := b.readFull(header)
+		requestLatency := time.Since(promise.requestTime)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+
+		decodedHeader := responseHeader{}
+		err = versionedDecode(header, &decodedHeader, promise.response.headerVersion(), b.metricRegistry)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+		if decodedHeader.correlationID != promise.correlationID {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			// TODO if decoded ID < cur ID, discard until we catch up
+			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", promise.correlationID, decodedHeader.correlationID)}
+			promise.handle(nil, dead)
+			continue
+		}
+
+		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
+		bytesReadBody, err := b.readFull(buf)
+		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+		if err != nil {
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+
+		promise.handle(buf, nil)
+	}
+	close(b.done)
+}
+
+func getHeaderLength(headerVersion int16) int8 {
+	if headerVersion < 1 {
+		return 8
+	} else {
+		// header contains additional tagged field length (0), we don't support actual tags yet.
+		return 9
+	}
+}
+
+func (b *Broker) sendAndReceiveApiVersions(v int16) (*ApiVersionsResponse, error) {
+	rb := &ApiVersionsRequest{
+		Version:               v,
+		ClientSoftwareName:    defaultClientSoftwareName,
+		ClientSoftwareVersion: version(),
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return nil, err
+	}
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to send ApiVersionsRequest V%d to %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+	b.correlationID++
+
+	// Kafka protocol response structure:
+	// - Message length (4 bytes): Total length of the response excluding this field
+	// - ResponseHeader v0 (4 bytes): Contains correlation ID for request-response matching
+	header := make([]byte, 8)
+	_, err = b.readFull(header)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read ApiVersionsResponse V%d header from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	length := binary.BigEndian.Uint32(header[:4])
+	// we're not using the correlation ID here, but it is part of the response header
+	// correlationID := binary.BigEndian.Uint32(header[4:])
+
+	payload := make([]byte, length-4)
+	n, err := b.readFull(payload)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read ApiVersionsResponse V%d payload from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &ApiVersionsResponse{Version: rb.version()}
+	err = versionedDecode(payload, res, rb.version(), b.metricRegistry)
+	if err != nil {
+		Logger.Printf("Failed to parse ApiVersionsResponse V%d from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	kerr := KError(res.ErrorCode)
+	if kerr != ErrNoError {
+		return res, fmt.Errorf("Error in ApiVersionsResponse V%d from %s: %w", res.Version, b.addr, kerr)
+	}
+
+	DebugLogger.Printf("Completed ApiVersionsRequest V%d to %s. Broker supports %d APIs\n", v, b.addr, len(res.ApiKeys))
+	return res, nil
+}
+
+func (b *Broker) authenticateViaSASLv0() error {
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv0()
+	case SASLTypeGSSAPI:
+		return b.sendAndReceiveKerberos()
+	default:
+		return b.sendAndReceiveSASLPlainAuthV0()
+	}
+}
+
+func (b *Broker) authenticateViaSASLv1() error {
+	metricRegistry := b.metricRegistry
+	if b.conf.Net.SASL.Handshake {
+		handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version}
+		handshakeResponse := new(SaslHandshakeResponse)
+		prom := makeResponsePromise(handshakeResponse)
+
+		handshakeErr := b.sendInternal(handshakeRequest, prom)
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+		handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry)
+		if handshakeErr != nil {
+			Logger.Printf("Error while handling SASL handshake response %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+
+		if !errors.Is(handshakeResponse.Err, ErrNoError) {
+			return handshakeResponse.Err
+		}
+	}
+
+	authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) {
+		authenticateRequest := b.createSaslAuthenticateRequest(authBytes)
+		authenticateResponse := new(SaslAuthenticateResponse)
+		prom := makeResponsePromise(authenticateResponse)
+		authErr := b.sendInternal(authenticateRequest, prom)
+		if authErr != nil {
+			Logger.Printf("Error while performing SASL Auth %s\n", b.addr)
+			return nil, authErr
+		}
+		authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry)
+		if authErr != nil {
+			Logger.Printf("Error while performing SASL Auth %s: %s\n", b.addr, authErr)
+			return nil, authErr
+		}
+
+		if !errors.Is(authenticateResponse.Err, ErrNoError) {
+			var err error = authenticateResponse.Err
+			if authenticateResponse.ErrorMessage != nil {
+				err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage))
+			}
+			return nil, err
+		}
+
+		b.computeSaslSessionLifetime(authenticateResponse)
+		return authenticateResponse, nil
+	}
+
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeGSSAPI:
+		b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+		if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+			b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+		}
+		return b.kerberosAuthenticator.AuthorizeV2(b, authSendReceiver)
+	case SASLTypeOAuth:
+		provider := b.conf.Net.SASL.TokenProvider
+		return b.sendAndReceiveSASLOAuth(authSendReceiver, provider)
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc())
+	default:
+		return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver)
+	}
+}
+
+func (b *Broker) sendAndReceiveKerberos() error {
+	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+	}
+	return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+		return err
+	}
+	b.correlationID++
+
+	header := make([]byte, 8) // response header
+	_, err = b.readFull(header)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+		return err
+	}
+
+	length := binary.BigEndian.Uint32(header[:4])
+	payload := make([]byte, length-4)
+	n, err := b.readFull(payload)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+		return err
+	}
+
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &SaslHandshakeResponse{}
+
+	err = versionedDecode(payload, res, 0, b.metricRegistry)
+	if err != nil {
+		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+		return err
+	}
+
+	if !errors.Is(res.Err, ErrNoError) {
+		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+		return res.Err
+	}
+
+	DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
+	return nil
+}
+
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
+//   authcid   = 1*SAFE ; MUST accept up to 255 octets
+//   authzid   = 1*SAFE ; MUST accept up to 255 octets
+//   passwd    = 1*SAFE ; MUST accept up to 255 octets
+//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
+//
+//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
+//                  ;; any UTF-8 encoded Unicode character except NUL
+//
+//
+
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol
+//
+// With SASL v0 handshake and auth then:
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection.
+func (b *Broker) sendAndReceiveSASLPlainAuthV0() error {
+	// default to V0 to allow for backward compatibility when SASL is enabled
+	// but not the handshake
+	if b.conf.Net.SASL.Handshake {
+		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+	}
+
+	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) // 4 byte length header + auth data
+	binary.BigEndian.PutUint32(authBytes, uint32(length))
+	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytesWritten, err := b.write(authBytes)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	header := make([]byte, 4)
+	n, err := b.readFull(header)
+	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+	// If the credentials are valid, we would get a 4 byte response filled with null characters.
+	// Otherwise, the broker closes the connection and we get an EOF
+	if err != nil {
+		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+	return nil
+}
+
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
+func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error {
+	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	_, err := authSendReceiver(authBytes)
+	return err
+}
+
+func currentUnixMilli() int64 {
+	return time.Now().UnixNano() / int64(time.Millisecond)
+}
+
+// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
+// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
+func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error {
+	token, err := provider.Token()
+	if err != nil {
+		return err
+	}
+
+	message, err := buildClientFirstMessage(token)
+	if err != nil {
+		return err
+	}
+
+	res, err := authSendReceiver(message)
+	if err != nil {
+		return err
+	}
+	isChallenge := len(res.SaslAuthBytes) > 0
+
+	if isChallenge {
+		// Abort the token exchange. The broker returns the failure code.
+		_, err = authSendReceiver([]byte(`\x01`))
+	}
+	return err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv0() error {
+	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil {
+		return err
+	}
+
+	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+	}
+
+	for !scramClient.Done() {
+		requestTime := time.Now()
+		// Will be decremented in updateIncomingCommunicationMetrics (except error)
+		b.addRequestInFlightMetrics(1)
+		length := len(msg)
+		authBytes := make([]byte, length+4) // 4 byte length header + auth data
+		binary.BigEndian.PutUint32(authBytes, uint32(length))
+		copy(authBytes[4:], msg)
+		_, err := b.write(authBytes)
+		b.updateOutgoingCommunicationMetrics(length + 4)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		b.correlationID++
+		header := make([]byte, 4)
+		_, err = b.readFull(header)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		payload := make([]byte, int32(binary.BigEndian.Uint32(header)))
+		n, err := b.readFull(payload)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime))
+		msg, err = scramClient.Step(string(payload))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	DebugLogger.Println("SASL authentication succeeded")
+	return nil
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error {
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+	}
+
+	for !scramClient.Done() {
+		res, err := authSendReceiver([]byte(msg))
+		if err != nil {
+			return err
+		}
+
+		msg, err = scramClient.Step(string(res.SaslAuthBytes))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	DebugLogger.Println("SASL authentication succeeded")
+
+	return nil
+}
+
+func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest {
+	authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg}
+	if b.conf.Version.IsAtLeast(V2_2_0_0) {
+		authenticateRequest.Version = 1
+	}
+
+	return &authenticateRequest
+}
+
+// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
+// https://tools.ietf.org/html/rfc7628
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
+	var ext string
+
+	if token == nil {
+		return []byte{}, fmt.Errorf("failed to build client first message: token is nil")
+	}
+
+	if len(token.Extensions) > 0 {
+		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
+			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
+		}
+		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
+	}
+
+	resp := fmt.Appendf(nil, "n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)
+
+	return resp, nil
+}
+
+// mapToString returns a list of key-value pairs ordered by key.
+// keyValSep separates the key from the value. elemSep separates each pair.
+func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
+	buf := make([]string, 0, len(extensions))
+
+	for k, v := range extensions {
+		buf = append(buf, k+keyValSep+v)
+	}
+
+	sort.Strings(buf)
+
+	return strings.Join(buf, elemSep)
+}
+
+func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) {
+	if res.SessionLifetimeMs > 0 {
+		// Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes
+		// pick a random percentage between 85% and 95% for session re-authentication
+		positiveSessionLifetimeMs := res.SessionLifetimeMs
+		authenticationEndMs := currentUnixMilli()
+		pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85
+		pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10
+		pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously
+		sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse)
+		DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse)
+		b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse
+	} else {
+		b.clientSessionReauthenticationTimeMs = 0
+	}
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
+	b.responseRate.Mark(1)
+
+	if b.brokerResponseRate != nil {
+		b.brokerResponseRate.Mark(1)
+	}
+
+	responseSize := int64(bytes)
+	b.incomingByteRate.Mark(responseSize)
+	if b.brokerIncomingByteRate != nil {
+		b.brokerIncomingByteRate.Mark(responseSize)
+	}
+
+	b.responseSize.Update(responseSize)
+	if b.brokerResponseSize != nil {
+		b.brokerResponseSize.Update(responseSize)
+	}
+}
+
+func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
+	requestLatencyInMs := int64(requestLatency / time.Millisecond)
+	b.requestLatency.Update(requestLatencyInMs)
+
+	if b.brokerRequestLatency != nil {
+		b.brokerRequestLatency.Update(requestLatencyInMs)
+	}
+
+	b.addRequestInFlightMetrics(-1)
+}
+
+func (b *Broker) addRequestInFlightMetrics(i int64) {
+	b.requestsInFlight.Inc(i)
+	if b.brokerRequestsInFlight != nil {
+		b.brokerRequestsInFlight.Inc(i)
+	}
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+	b.requestRate.Mark(1)
+	if b.brokerRequestRate != nil {
+		b.brokerRequestRate.Mark(1)
+	}
+
+	requestSize := int64(bytes)
+	b.outgoingByteRate.Mark(requestSize)
+	if b.brokerOutgoingByteRate != nil {
+		b.brokerOutgoingByteRate.Mark(requestSize)
+	}
+
+	b.requestSize.Update(requestSize)
+	if b.brokerRequestSize != nil {
+		b.brokerRequestSize.Update(requestSize)
+	}
+}
+
+func (b *Broker) updateProtocolMetrics(rb protocolBody) {
+	protocolRequestsRate := b.protocolRequestsRate[rb.key()]
+	if protocolRequestsRate == nil {
+		protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry)
+		b.protocolRequestsRate[rb.key()] = protocolRequestsRate
+	}
+	protocolRequestsRate.Mark(1)
+
+	if b.brokerProtocolRequestsRate != nil {
+		brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()]
+		if brokerProtocolRequestsRate == nil {
+			brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()))
+			b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate
+		}
+		brokerProtocolRequestsRate.Mark(1)
+	}
+}
+
+type throttleSupport interface {
+	throttleTime() time.Duration
+}
+
+func (b *Broker) handleThrottledResponse(resp protocolBody) {
+	throttledResponse, ok := resp.(throttleSupport)
+	if !ok {
+		return
+	}
+	throttleTime := throttledResponse.throttleTime()
+	if throttleTime == time.Duration(0) {
+		return
+	}
+	DebugLogger.Printf(
+		"broker/%d %T throttled %v\n", b.ID(), resp, throttleTime)
+	b.setThrottle(throttleTime)
+	b.updateThrottleMetric(throttleTime)
+}
+
+func (b *Broker) setThrottle(throttleTime time.Duration) {
+	b.throttleTimerLock.Lock()
+	defer b.throttleTimerLock.Unlock()
+	if b.throttleTimer != nil {
+		// if there is an existing timer stop/clear it
+		if !b.throttleTimer.Stop() {
+			<-b.throttleTimer.C
+		}
+	}
+	b.throttleTimer = time.NewTimer(throttleTime)
+}
+
+func (b *Broker) waitIfThrottled() {
+	b.throttleTimerLock.Lock()
+	defer b.throttleTimerLock.Unlock()
+	if b.throttleTimer != nil {
+		DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID())
+		<-b.throttleTimer.C
+		b.throttleTimer = nil
+	}
+}
+
+func (b *Broker) updateThrottleMetric(throttleTime time.Duration) {
+	if b.brokerThrottleTime != nil {
+		throttleTimeInMs := int64(throttleTime / time.Millisecond)
+		b.brokerThrottleTime.Update(throttleTimeInMs)
+	}
+}
+
+func (b *Broker) registerMetrics() {
+	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+	b.brokerRequestRate = b.registerMeter("request-rate")
+	b.brokerFetchRate = b.registerMeter("consumer-fetch-rate")
+	b.brokerRequestSize = b.registerHistogram("request-size")
+	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+	b.brokerResponseRate = b.registerMeter("response-rate")
+	b.brokerResponseSize = b.registerHistogram("response-size")
+	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
+	b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms")
+	b.brokerProtocolRequestsRate = map[int16]metrics.Meter{}
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return getOrRegisterHistogram(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerCounter(name string) metrics.Counter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry)
+}
+
+func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		cfg = &tls.Config{
+			MinVersion: tls.VersionTLS12,
+		}
+	}
+	if cfg.ServerName != "" {
+		return cfg
+	}
+
+	c := cfg.Clone()
+	sn, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
+	}
+	c.ServerName = sn
+	return c
+}
diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go
new file mode 100644
index 0000000..0dc29e2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/client.go
@@ -0,0 +1,1291 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"math"
+	"math/rand"
+	"net"
+	"slices"
+	"sort"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/proxy"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+	// Config returns the Config struct of the client. This struct should not be
+	// altered after it has been created.
+	Config() *Config
+
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available. You can call RefreshController
+	// to update the cached value. Requires Kafka 0.10 or higher.
+	Controller() (*Broker, error)
+
+	// RefreshController retrieves the cluster controller from fresh metadata
+	// and stores it in the local cache. Requires Kafka 0.10 or higher.
+	RefreshController() (*Broker, error)
+
+	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
+	Brokers() []*Broker
+
+	// Broker returns the active Broker if available for the broker ID.
+	Broker(brokerID int32) (*Broker, error)
+
+	// Topics returns the set of available topics as retrieved from cluster metadata.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	Partitions(topic string) ([]int32, error)
+
+	// WritablePartitions returns the sorted list of all writable partition IDs for
+	// the given topic, where "writable" means "having a valid leader accepting
+	// writes".
+	WritablePartitions(topic string) ([]int32, error)
+
+	// Leader returns the broker object that is the leader of the current
+	// topic/partition, as determined by querying the cluster metadata.
+	Leader(topic string, partitionID int32) (*Broker, error)
+
+	// LeaderAndEpoch returns the leader and its epoch for the current
+	// topic/partition, as determined by querying the cluster metadata.
+	LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error)
+
+	// Replicas returns the set of all replica IDs for the given partition.
+	Replicas(topic string, partitionID int32) ([]int32, error)
+
+	// InSyncReplicas returns the set of all in-sync replica IDs for the given
+	// partition. In-sync replicas are replicas which are fully caught up with
+	// the partition leader.
+	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// OfflineReplicas returns the set of all offline replica IDs for the given
+	// partition. Offline replicas are replicas which are offline
+	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// RefreshBrokers takes a list of addresses to be used as seed brokers.
+	// Existing broker connections are closed and the updated list of seed brokers
+	// will be used for the next metadata fetch.
+	RefreshBrokers(addrs []string) error
+
+	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
+	// available metadata for those topics. If no topics are provided, it will refresh
+	// metadata for all topics.
+	RefreshMetadata(topics ...string) error
+
+	// GetOffset queries the cluster to get the most recent available offset at the
+	// given time (in milliseconds) on the topic/partition combination.
+	// Time should be OffsetOldest for the earliest available offset,
+	// OffsetNewest for the offset of the message that will be produced next, or a time.
+	GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.8.2 and higher.
+	Coordinator(consumerGroup string) (*Broker, error)
+
+	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+	// in local cache. This function only works on Kafka 0.8.2 and higher.
+	RefreshCoordinator(consumerGroup string) error
+
+	// TransactionCoordinator returns the coordinating broker for a transaction id. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.11.0.0 and higher.
+	TransactionCoordinator(transactionID string) (*Broker, error)
+
+	// RefreshTransactionCoordinator retrieves the coordinator for a transaction id and stores it
+	// in local cache. This function only works on Kafka 0.11.0.0 and higher.
+	RefreshTransactionCoordinator(transactionID string) error
+
+	// InitProducerID retrieves information required for Idempotent Producer
+	InitProducerID() (*InitProducerIDResponse, error)
+
+	// LeastLoadedBroker retrieves broker that has the least responses pending
+	LeastLoadedBroker() *Broker
+
+	// PartitionNotReadable checks if partition is not readable
+	PartitionNotReadable(topic string, partition int32) bool
+
+	// Close shuts down all broker connections managed by this client. It is required
+	// to call this function before a client object passes out of scope, as it will
+	// otherwise leak memory. You must close any Producers or Consumers using a client
+	// before you close the client.
+	Close() error
+
+	// Closed returns true if the client has already had Close called on it
+	Closed() bool
+}
+
+const (
+	// OffsetNewest stands for the log head offset, i.e. the offset that will be
+	// assigned to the next message that will be produced to the partition. You
+	// can send this to a client's GetOffset method to get this offset, or when
+	// calling ConsumePartition to start consuming new messages.
+	OffsetNewest int64 = -1
+	// OffsetOldest stands for the oldest offset available on the broker for a
+	// partition. You can send this to a client's GetOffset method to get this
+	// offset, or when calling ConsumePartition to start consuming from the
+	// oldest offset that is still available on the broker.
+	OffsetOldest int64 = -2
+)
+
+type client struct {
+	// updateMetadataMs stores the time at which metadata was lasted updated.
+	// Note: this accessed atomically so must be the first word in the struct
+	// as per golang/go#41970
+	updateMetadataMs atomic.Int64
+
+	conf           *Config
+	closer, closed chan none // for shutting down background metadata updater
+
+	// the broker addresses given to us through the constructor are not guaranteed to be returned in
+	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+	// so we store them separately
+	seedBrokers []*Broker
+	deadSeeds   []*Broker
+
+	controllerID            int32                                   // cluster controller broker id
+	brokers                 map[int32]*Broker                       // maps broker ids to brokers
+	metadata                map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+	metadataTopics          map[string]none                         // topics that need to collect metadata
+	coordinators            map[string]int32                        // Maps consumer group names to coordinating broker IDs
+	transactionCoordinators map[string]int32                        // Maps transaction ids to coordinating broker IDs
+
+	// If the number of partitions is large, we can get some churn calling cachedPartitions,
+	// so the result is cached.  It is important to update this value whenever metadata is changed
+	cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+	lock sync.RWMutex // protects access to the maps that hold cluster state.
+
+	metadataRefresh metadataRefresh
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+	DebugLogger.Println("Initializing new client")
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	if err := conf.Validate(); err != nil {
+		return nil, err
+	}
+
+	if len(addrs) < 1 {
+		return nil, ConfigurationError("You must provide at least one broker address")
+	}
+
+	if strings.Contains(addrs[0], ".servicebus.windows.net") {
+		if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) {
+			Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility")
+			conf.Version = V1_0_0_0
+		}
+	}
+	client := &client{
+		conf:                    conf,
+		closer:                  make(chan none),
+		closed:                  make(chan none),
+		brokers:                 make(map[int32]*Broker),
+		metadata:                make(map[string]map[int32]*PartitionMetadata),
+		metadataTopics:          make(map[string]none),
+		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+		coordinators:            make(map[string]int32),
+		transactionCoordinators: make(map[string]int32),
+	}
+	refresh := func(topics []string) error {
+		deadline := time.Time{}
+		if client.conf.Metadata.Timeout > 0 {
+			deadline = time.Now().Add(client.conf.Metadata.Timeout)
+		}
+		return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
+	}
+	if conf.Metadata.SingleFlight {
+		client.metadataRefresh = newSingleFlightRefresher(refresh)
+	} else {
+		client.metadataRefresh = refresh
+	}
+
+	if conf.Net.ResolveCanonicalBootstrapServers {
+		var err error
+		addrs, err = client.resolveCanonicalNames(addrs)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	client.randomizeSeedBrokers(addrs)
+
+	if conf.Metadata.Full {
+		// do an initial fetch of all cluster metadata by specifying an empty list of topics
+		err := client.RefreshMetadata()
+		if err == nil {
+		} else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) {
+			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
+			Logger.Println(err)
+		} else {
+			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+			_ = client.Close()
+			return nil, err
+		}
+	}
+	go withRecover(client.backgroundMetadataUpdater)
+
+	DebugLogger.Println("Successfully initialized new client")
+
+	return client, nil
+}
+
+func (client *client) Config() *Config {
+	return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	brokers := make([]*Broker, 0, len(client.brokers))
+	for _, broker := range client.brokers {
+		brokers = append(brokers, broker)
+	}
+	return brokers
+}
+
+func (client *client) Broker(brokerID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	broker, ok := client.brokers[brokerID]
+	if !ok {
+		return nil, ErrBrokerNotFound
+	}
+	_ = broker.Open(client.conf)
+	return broker, nil
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+	// FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go?
+	brokerErrors := make([]error, 0)
+	for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+		request := &InitProducerIDRequest{}
+
+		if client.conf.Version.IsAtLeast(V2_7_0_0) {
+			// Version 4 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 4
+		} else if client.conf.Version.IsAtLeast(V2_5_0_0) {
+			// Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error
+			request.Version = 3
+		} else if client.conf.Version.IsAtLeast(V2_4_0_0) {
+			// Version 2 is the first flexible version.
+			request.Version = 2
+		} else if client.conf.Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+
+		response, err := broker.InitProducerID(request)
+		if err == nil {
+			return response, nil
+		} else {
+			// some error, remove that broker and try again
+			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+			_ = broker.Close()
+			brokerErrors = append(brokerErrors, err)
+			client.deregisterBroker(broker)
+		}
+	}
+
+	return nil, Wrap(ErrOutOfBrokers, brokerErrors...)
+}
+
+func (client *client) Close() error {
+	if client.Closed() {
+		// Chances are this is being called from a defer() and the error will go unobserved
+		// so we go ahead and log the event in this case.
+		Logger.Printf("Close() called on already closed client")
+		return ErrClosedClient
+	}
+
+	// shutdown and wait for the background thread before we take the lock, to avoid races
+	close(client.closer)
+	<-client.closed
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	DebugLogger.Println("Closing Client")
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	client.brokers = nil
+	client.metadata = nil
+	client.metadataTopics = nil
+
+	return nil
+}
+
+func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadata))
+	for topic := range client.metadata {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadataTopics))
+	for topic := range client.metadataTopics {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+	return client.getPartitions(topic, allPartitions)
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+	return client.getPartitions(topic, writablePartitions)
+}
+
+func (client *client) getPartitions(topic string, pt partitionType) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, pt)
+
+	// len==0 catches when it's nil (no such topic) and the odd case when every single
+	// partition is undergoing leader election simultaneously. Callers have to be able to handle
+	// this function returning an empty slice (which is a valid return value) but catching it
+	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+	// a metadata refresh as a nicety so callers can just try again and don't have to manually
+	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, pt)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.Replicas
+	})
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.Isr
+	})
+}
+
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.OfflineReplicas
+	})
+}
+
+func (client *client) getReplicas(topic string, partitionID int32, extractor func(metadata *PartitionMetadata) []int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	replicas := extractor(metadata)
+	if errors.Is(metadata.Err, ErrReplicaNotAvailable) {
+		return dupInt32Slice(replicas), metadata.Err
+	}
+	return dupInt32Slice(replicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+	leader, _, err := client.LeaderAndEpoch(topic, partitionID)
+	return leader, err
+}
+
+func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) {
+	if client.Closed() {
+		return nil, -1, ErrClosedClient
+	}
+
+	leader, epoch, err := client.cachedLeader(topic, partitionID)
+	if leader == nil {
+		err = client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, -1, err
+		}
+		leader, epoch, err = client.cachedLeader(topic, partitionID)
+	}
+
+	return leader, epoch, err
+}
+
+func (client *client) RefreshBrokers(addrs []string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+	client.brokers = make(map[int32]*Broker)
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.deadSeeds {
+		safeAsyncClose(broker)
+	}
+
+	client.seedBrokers = nil
+	client.deadSeeds = nil
+
+	client.randomizeSeedBrokers(addrs)
+
+	return nil
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+	// error. This handles the case by returning an error instead of sending it
+	// off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310
+	if slices.Contains(topics, "") {
+		return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+	}
+	return client.metadataRefresh(topics)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+	if client.Closed() {
+		return -1, ErrClosedClient
+	}
+
+	offset, err := client.getOffset(topic, partitionID, timestamp)
+	if err != nil {
+		if err := client.RefreshMetadata(topic); err != nil {
+			return -1, err
+		}
+		return client.getOffset(topic, partitionID, timestamp)
+	}
+
+	return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		if err := client.refreshMetadata(); err != nil {
+			return nil, err
+		}
+		controller = client.cachedController()
+	}
+
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+// deregisterController removes the cached controllerID
+func (client *client) deregisterController() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	if controller, ok := client.brokers[client.controllerID]; ok {
+		_ = controller.Close()
+		delete(client.brokers, client.controllerID)
+	}
+}
+
+// RefreshController retrieves the cluster controller from fresh metadata
+// and stores it in the local cache. Requires Kafka 0.10 or higher.
+func (client *client) RefreshController() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.deregisterController()
+
+	if err := client.refreshMetadata(); err != nil {
+		return nil, err
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedCoordinator(consumerGroup)
+
+	if coordinator == nil {
+		if err := client.RefreshCoordinator(consumerGroup); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedCoordinator(consumerGroup)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.coordinators[consumerGroup] = response.Coordinator.ID()
+	return nil
+}
+
+func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedTransactionCoordinator(transactionID)
+
+	if coordinator == nil {
+		if err := client.RefreshTransactionCoordinator(transactionID); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedTransactionCoordinator(transactionID)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshTransactionCoordinator(transactionID string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.transactionCoordinators[transactionID] = response.Coordinator.ID()
+	return nil
+}
+
+// private broker management helpers
+
+func (client *client) randomizeSeedBrokers(addrs []string) {
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+}
+
+func (client *client) updateBroker(brokers []*Broker) {
+	currentBroker := make(map[int32]*Broker, len(brokers))
+
+	for _, broker := range brokers {
+		currentBroker[broker.ID()] = broker
+		if client.brokers[broker.ID()] == nil { // add new broker
+			client.brokers[broker.ID()] = broker
+			DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
+			safeAsyncClose(client.brokers[broker.ID()])
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+
+	for id, broker := range client.brokers {
+		if _, exist := currentBroker[id]; !exist { // remove old broker
+			safeAsyncClose(broker)
+			delete(client.brokers, id)
+			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+}
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
+	if client.brokers[broker.ID()] == nil {
+		client.brokers[broker.ID()] = broker
+		DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+		safeAsyncClose(client.brokers[broker.ID()])
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+	}
+}
+
+// deregisterBroker removes a broker from the broker list, and if it's
+// not in the broker list, removes it from seedBrokers.
+func (client *client) deregisterBroker(broker *Broker) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	_, ok := client.brokers[broker.ID()]
+	if ok {
+		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+		delete(client.brokers, broker.ID())
+		return
+	}
+	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+		client.deadSeeds = append(client.deadSeeds, broker)
+		client.seedBrokers = client.seedBrokers[1:]
+	}
+}
+
+func (client *client) resurrectDeadBrokers() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+	client.deadSeeds = nil
+}
+
+// LeastLoadedBroker returns the broker with the least pending requests.
+// Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers.
+func (client *client) LeastLoadedBroker() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	var leastLoadedBroker *Broker
+	pendingRequests := math.MaxInt
+	for _, broker := range client.brokers {
+		if pendingRequests > broker.ResponseSize() {
+			pendingRequests = broker.ResponseSize()
+			leastLoadedBroker = broker
+		}
+	}
+	if leastLoadedBroker != nil {
+		_ = leastLoadedBroker.Open(client.conf)
+		return leastLoadedBroker
+	}
+
+	if len(client.seedBrokers) > 0 {
+		_ = client.seedBrokers[0].Open(client.conf)
+		return client.seedBrokers[0]
+	}
+
+	return leastLoadedBroker
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+	allPartitions partitionType = iota
+	writablePartitions
+	// If you add any more types, update the partition cache in update()
+
+	// Ensure this is the last partition type value
+	maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		return partitions[partitionID]
+	}
+
+	return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions, exists := client.cachedPartitionsResults[topic]
+
+	if !exists {
+		return nil
+	}
+	return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+	partitions := client.metadata[topic]
+
+	if partitions == nil {
+		return nil
+	}
+
+	ret := make([]int32, 0, len(partitions))
+	for _, partition := range partitions {
+		if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) {
+			continue
+		}
+		ret = append(ret, partition.ID)
+	}
+
+	sort.Sort(int32Slice(ret))
+	return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		metadata, ok := partitions[partitionID]
+		if ok {
+			if errors.Is(metadata.Err, ErrLeaderNotAvailable) {
+				return nil, -1, ErrLeaderNotAvailable
+			}
+			b := client.brokers[metadata.Leader]
+			if b == nil {
+				return nil, -1, ErrLeaderNotAvailable
+			}
+			_ = b.Open(client.conf)
+			return b, metadata.LeaderEpoch, nil
+		}
+	}
+
+	return nil, -1, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+	broker, err := client.Leader(topic, partitionID)
+	if err != nil {
+		return -1, err
+	}
+
+	request := NewOffsetRequest(client.conf.Version)
+	request.AddBlock(topic, partitionID, timestamp, 1)
+
+	response, err := broker.GetAvailableOffsets(request)
+	if err != nil {
+		_ = broker.Close()
+		return -1, err
+	}
+
+	block := response.GetBlock(topic, partitionID)
+	if block == nil {
+		_ = broker.Close()
+		return -1, ErrIncompleteResponse
+	}
+	if !errors.Is(block.Err, ErrNoError) {
+		return -1, block.Err
+	}
+	if len(block.Offsets) != 1 {
+		return -1, ErrOffsetOutOfRange
+	}
+
+	return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+	defer close(client.closed)
+
+	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := client.refreshMetadata(); err != nil {
+				Logger.Println("Client background metadata update:", err)
+			}
+		case <-client.closer:
+			return
+		}
+	}
+}
+
+func (client *client) refreshMetadata() error {
+	var topics []string
+
+	if !client.conf.Metadata.Full {
+		if specificTopics, err := client.MetadataTopics(); err != nil {
+			return err
+		} else if len(specificTopics) == 0 {
+			return ErrNoTopicsToUpdateMetadata
+		} else {
+			topics = specificTopics
+		}
+	}
+
+	if err := client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+	pastDeadline := func(backoff time.Duration) bool {
+		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+			// we are past the deadline
+			return true
+		}
+		return false
+	}
+	retry := func(err error) error {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			if pastDeadline(backoff) {
+				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+				return err
+			}
+			if backoff > 0 {
+				time.Sleep(backoff)
+			}
+
+			t := client.updateMetadataMs.Load()
+			if time.Since(time.UnixMilli(t)) < backoff {
+				return err
+			}
+			attemptsRemaining--
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+
+			return client.tryRefreshMetadata(topics, attemptsRemaining, deadline)
+		}
+		return err
+	}
+
+	broker := client.LeastLoadedBroker()
+	brokerErrors := make([]error, 0)
+	for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() {
+		allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation
+		if len(topics) > 0 {
+			DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+		} else {
+			allowAutoTopicCreation = false
+			DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+		}
+
+		req := NewMetadataRequest(client.conf.Version, topics)
+		req.AllowAutoTopicCreation = allowAutoTopicCreation
+		client.updateMetadataMs.Store(time.Now().UnixMilli())
+
+		response, err := broker.GetMetadata(req)
+		var kerror KError
+		var packetEncodingError PacketEncodingError
+		if err == nil {
+			// When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924).
+			if len(response.Brokers) == 0 {
+				Logger.Printf("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr)
+				_ = broker.Close()
+				client.deregisterBroker(broker)
+				continue
+			}
+			allKnownMetaData := len(topics) == 0
+			// valid response, use it
+			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+			if shouldRetry {
+				Logger.Println("client/metadata found some partitions to be leaderless")
+				return retry(err) // note: err can be nil
+			}
+			return err
+		} else if errors.As(err, &packetEncodingError) {
+			// didn't even send, return the error
+			return err
+		} else if errors.As(err, &kerror) {
+			// if SASL auth error return as this _should_ be a non retryable err for all brokers
+			if errors.Is(err, ErrSASLAuthenticationFailed) {
+				Logger.Println("client/metadata failed SASL authentication")
+				return err
+			}
+
+			if errors.Is(err, ErrTopicAuthorizationFailed) {
+				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+				return err
+			}
+			// else remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		} else {
+			// some other error, remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			brokerErrors = append(brokerErrors, err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+
+	error := Wrap(ErrOutOfBrokers, brokerErrors...)
+	if broker != nil {
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		return retry(error)
+	}
+
+	Logger.Println("client/metadata no available broker to send metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(error)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	// For all the brokers we received:
+	// - if it is a new ID, save it
+	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - if some brokers is not exist in it, remove old broker
+	// - otherwise ignore it, replacing our existing one would just bounce the connection
+	client.updateBroker(data.Brokers)
+
+	client.controllerID = data.ControllerID
+
+	if allKnownMetaData {
+		client.metadata = make(map[string]map[int32]*PartitionMetadata)
+		client.metadataTopics = make(map[string]none)
+		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+	}
+	for _, topic := range data.Topics {
+		// topics must be added firstly to `metadataTopics` to guarantee that all
+		// requested topics must be recorded to keep them trackable for periodically
+		// metadata refresh.
+		if _, exists := client.metadataTopics[topic.Name]; !exists {
+			client.metadataTopics[topic.Name] = none{}
+		}
+		delete(client.metadata, topic.Name)
+		delete(client.cachedPartitionsResults, topic.Name)
+
+		switch topic.Err {
+		case ErrNoError:
+			// no-op
+		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+			err = topic.Err
+			continue
+		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+			err = topic.Err
+			retry = true
+			continue
+		case ErrLeaderNotAvailable: // retry, but store partial partition results
+			retry = true
+		default: // don't retry, don't store partial results
+			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+			err = topic.Err
+			continue
+		}
+
+		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			client.metadata[topic.Name][partition.ID] = partition
+			if errors.Is(partition.Err, ErrLeaderNotAvailable) {
+				retry = true
+			}
+		}
+
+		var partitionCache [maxPartitionIndex][]int32
+		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+		client.cachedPartitionsResults[topic.Name] = partitionCache
+	}
+
+	return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedTransactionCoordinator(transactionID string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedController() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers[client.controllerID]
+}
+
+func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
+	if client.conf.Metadata.Retry.BackoffFunc != nil {
+		maxRetries := client.conf.Metadata.Retry.Max
+		retries := maxRetries - attemptsRemaining
+		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
+	}
+	return client.conf.Metadata.Retry.Backoff
+}
+
+func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+	retry := func(err error) (*FindCoordinatorResponse, error) {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			attemptsRemaining--
+			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(backoff)
+			return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining)
+		}
+		return nil, err
+	}
+
+	brokerErrors := make([]error, 0)
+	for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+		DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr())
+
+		request := new(FindCoordinatorRequest)
+		request.CoordinatorKey = coordinatorKey
+		request.CoordinatorType = coordinatorType
+
+		// Version 1 adds KeyType.
+		if client.conf.Version.IsAtLeast(V0_11_0_0) {
+			request.Version = 1
+		}
+		// Version 2 is the same as version 1.
+		if client.conf.Version.IsAtLeast(V2_0_0_0) {
+			request.Version = 2
+		}
+
+		response, err := broker.FindCoordinator(request)
+		if err != nil {
+			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+			var packetEncodingError PacketEncodingError
+			if errors.As(err, &packetEncodingError) {
+				return nil, err
+			} else {
+				_ = broker.Close()
+				brokerErrors = append(brokerErrors, err)
+				client.deregisterBroker(broker)
+				continue
+			}
+		}
+
+		if errors.Is(response.Err, ErrNoError) {
+			DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr())
+			return response, nil
+		} else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) {
+			Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey)
+
+			// This is very ugly, but this scenario will only happen once per cluster.
+			// The __consumer_offsets topic only has to be created one time.
+			// The number of partitions not configurable, but partition 0 should always exist.
+			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+				time.Sleep(2 * time.Second)
+			}
+			if coordinatorType == CoordinatorTransaction {
+				if _, err := client.Leader("__transaction_state", 0); err != nil {
+					Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n")
+					time.Sleep(2 * time.Second)
+				}
+			}
+
+			return retry(ErrConsumerCoordinatorNotAvailable)
+		} else if errors.Is(response.Err, ErrGroupAuthorizationFailed) {
+			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey)
+			return retry(ErrGroupAuthorizationFailed)
+		} else {
+			return nil, response.Err
+		}
+	}
+
+	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(Wrap(ErrOutOfBrokers, brokerErrors...))
+}
+
+func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) {
+	ctx := context.Background()
+
+	dialer := client.Config().getDialer()
+	resolver := net.Resolver{
+		Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
+			// dial func should only be called once, so switching within is acceptable
+			switch d := dialer.(type) {
+			case proxy.ContextDialer:
+				return d.DialContext(ctx, network, address)
+			default:
+				// we have no choice but to ignore the context
+				return d.Dial(network, address)
+			}
+		},
+	}
+
+	canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go
+	for _, addr := range addrs {
+		host, port, err := net.SplitHostPort(addr)
+		if err != nil {
+			return nil, err // message includes addr
+		}
+
+		ips, err := resolver.LookupHost(ctx, host)
+		if err != nil {
+			return nil, err // message includes host
+		}
+		for _, ip := range ips {
+			ptrs, err := resolver.LookupAddr(ctx, ip)
+			if err != nil {
+				return nil, err // message includes ip
+			}
+
+			// unlike the Java client, we do not further check that PTRs resolve
+			ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI
+			canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{}
+		}
+	}
+
+	addrs = make([]string, 0, len(canonicalAddrs))
+	for addr := range canonicalAddrs {
+		addrs = append(addrs, addr)
+	}
+	return addrs, nil
+}
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+	Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+	return nil
+}
+
+func (client *client) PartitionNotReadable(topic string, partition int32) bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	pm := client.metadata[topic][partition]
+	if pm == nil {
+		return true
+	}
+	return pm.Leader == -1
+}
diff --git a/vendor/github.com/IBM/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go
new file mode 100644
index 0000000..b752cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/compress.go
@@ -0,0 +1,191 @@
+package sarama
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	snappy "github.com/eapache/go-xerial-snappy"
+	"github.com/klauspost/compress/gzip"
+	"github.com/pierrec/lz4/v4"
+)
+
+var (
+	lz4WriterPool = sync.Pool{
+		New: func() interface{} {
+			lz := lz4.NewWriter(nil)
+			if err := lz.Apply(lz4.BlockSizeOption(lz4.Block64Kb)); err != nil {
+				panic(err)
+			}
+			return lz
+		},
+	}
+
+	gzipWriterPool = sync.Pool{
+		New: func() interface{} {
+			return gzip.NewWriter(nil)
+		},
+	}
+	gzipWriterPoolForCompressionLevel1 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 1)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel2 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 2)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel3 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 3)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel4 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 4)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel5 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 5)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel6 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 6)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel7 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 7)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel8 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 8)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel9 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 9)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		return gzipCompress(level, data)
+	case CompressionSnappy:
+		return snappy.Encode(data), nil
+	case CompressionLZ4:
+		return lz4Compress(data)
+	case CompressionZSTD:
+		return zstdCompress(ZstdEncoderParams{level}, nil, data)
+	default:
+		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+	}
+}
+
+func gzipCompress(level int, data []byte) ([]byte, error) {
+	var (
+		buf    bytes.Buffer
+		writer *gzip.Writer
+		pool   *sync.Pool
+	)
+
+	switch level {
+	case CompressionLevelDefault:
+		pool = &gzipWriterPool
+	case 1:
+		pool = &gzipWriterPoolForCompressionLevel1
+	case 2:
+		pool = &gzipWriterPoolForCompressionLevel2
+	case 3:
+		pool = &gzipWriterPoolForCompressionLevel3
+	case 4:
+		pool = &gzipWriterPoolForCompressionLevel4
+	case 5:
+		pool = &gzipWriterPoolForCompressionLevel5
+	case 6:
+		pool = &gzipWriterPoolForCompressionLevel6
+	case 7:
+		pool = &gzipWriterPoolForCompressionLevel7
+	case 8:
+		pool = &gzipWriterPoolForCompressionLevel8
+	case 9:
+		pool = &gzipWriterPoolForCompressionLevel9
+	default:
+		var err error
+		writer, err = gzip.NewWriterLevel(&buf, level)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if pool != nil {
+		writer = pool.Get().(*gzip.Writer)
+		writer.Reset(&buf)
+		defer pool.Put(writer)
+	}
+	if _, err := writer.Write(data); err != nil {
+		return nil, err
+	}
+	if err := writer.Close(); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func lz4Compress(data []byte) ([]byte, error) {
+	writer := lz4WriterPool.Get().(*lz4.Writer)
+	defer lz4WriterPool.Put(writer)
+
+	var buf bytes.Buffer
+	writer.Reset(&buf)
+	if _, err := writer.Write(data); err != nil {
+		return nil, err
+	}
+	if err := writer.Close(); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go
new file mode 100644
index 0000000..5bac2b5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/config.go
@@ -0,0 +1,917 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"regexp"
+	"time"
+
+	"github.com/klauspost/compress/gzip"
+	"github.com/rcrowley/go-metrics"
+	"golang.org/x/net/proxy"
+)
+
+const defaultClientID = "sarama"
+
+// validClientID specifies the permitted characters for a client.id when
+// connecting to Kafka versions before 1.0.0 (KIP-190)
+var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+	Admin struct {
+		Retry struct {
+			// The total number of times to retry sending (retriable) admin requests (default 5).
+			// Similar to the `retries` setting of the JVM AdminClientConfig.
+			Max int
+			// Backoff time between retries of a failed request (default 100ms)
+			Backoff time.Duration
+		}
+		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+		Timeout time.Duration
+	}
+
+	// Net is the namespace for network-level properties used by the Broker, and
+	// shared by the Client/Producer/Consumer.
+	Net struct {
+		// How many outstanding requests a connection is allowed to have before
+		// sending on it blocks (default 5).
+		// Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see:
+		// https://kafka.apache.org/protocol#protocol_network
+		// https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection
+		MaxOpenRequests int
+
+		// All three of the below configurations are similar to the
+		// `socket.timeout.ms` setting in JVM kafka. All of them default
+		// to 30 seconds.
+		DialTimeout  time.Duration // How long to wait for the initial connection.
+		ReadTimeout  time.Duration // How long to wait for a response.
+		WriteTimeout time.Duration // How long to wait for a transmit.
+
+		// ResolveCanonicalBootstrapServers turns each bootstrap broker address
+		// into a set of IPs, then does a reverse lookup on each one to get its
+		// canonical hostname. This list of hostnames then replaces the
+		// original address list. Similar to the `client.dns.lookup` option in
+		// the JVM client, this is especially useful with GSSAPI, where it
+		// allows providing an alias record instead of individual broker
+		// hostnames. Defaults to false.
+		ResolveCanonicalBootstrapServers bool
+
+		TLS struct {
+			// Whether or not to use TLS when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The TLS configuration to use for secure connections if
+			// enabled (defaults to nil).
+			Config *tls.Config
+		}
+
+		// SASL based authentication with broker. While there are multiple SASL authentication methods
+		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
+		SASL struct {
+			// Whether or not to use SASL authentication when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// SASLMechanism is the name of the enabled SASL mechanism.
+			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
+			Mechanism SASLMechanism
+			// Version is the SASL Protocol Version to use
+			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
+			Version int16
+			// Whether or not to send the Kafka SASL handshake first if enabled
+			// (defaults to true). You should only set this to false if you're using
+			// a non-Kafka SASL proxy.
+			Handshake bool
+			// AuthIdentity is an (optional) authorization identity (authzid) to
+			// use for SASL/PLAIN authentication (if different from User) when
+			// an authenticated user is permitted to act as the presented
+			// alternative user. See RFC4616 for details.
+			AuthIdentity string
+			// User is the authentication identity (authcid) to present for
+			// SASL/PLAIN or SASL/SCRAM authentication
+			User string
+			// Password for SASL/PLAIN authentication
+			Password string
+			// authz id used for SASL/SCRAM authentication
+			SCRAMAuthzID string
+			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+			// client used to perform the SCRAM exchange with the server.
+			SCRAMClientGeneratorFunc func() SCRAMClient
+			// TokenProvider is a user-defined callback for generating
+			// access tokens for SASL/OAUTHBEARER auth. See the
+			// AccessTokenProvider interface docs for proper implementation
+			// guidelines.
+			TokenProvider AccessTokenProvider
+
+			GSSAPI GSSAPIConfig
+		}
+
+		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
+		// If zero or positive, keep-alives are enabled.
+		// If negative, keep-alives are disabled.
+		KeepAlive time.Duration
+
+		// LocalAddr is the local address to use when dialing an
+		// address. The address must be of a compatible type for the
+		// network being dialed.
+		// If nil, a local address is automatically chosen.
+		LocalAddr net.Addr
+
+		Proxy struct {
+			// Whether or not to use proxy when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The proxy dialer to use enabled (defaults to nil).
+			Dialer proxy.Dialer
+		}
+	}
+
+	// Metadata is the namespace for metadata management properties used by the
+	// Client, and shared by the Producer/Consumer.
+	Metadata struct {
+		Retry struct {
+			// The total number of times to retry a metadata request when the
+			// cluster is in the middle of a leader election (default 3).
+			Max int
+			// How long to wait for leader election to occur before retrying
+			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+		}
+		// How frequently to refresh the cluster metadata in the background.
+		// Defaults to 10 minutes. Set to 0 to disable. Similar to
+		// `topic.metadata.refresh.interval.ms` in the JVM version.
+		RefreshFrequency time.Duration
+
+		// Whether to maintain a full set of metadata for all topics, or just
+		// the minimal set that has been necessary so far. The full set is simpler
+		// and usually more convenient, but can take up a substantial amount of
+		// memory if you have many topics and partitions. Defaults to true.
+		Full bool
+
+		// How long to wait for a successful metadata response.
+		// Disabled by default which means a metadata request against an unreachable
+		// cluster (all brokers are unreachable or unresponsive) can take up to
+		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+		// to fail.
+		Timeout time.Duration
+
+		// Whether to allow auto-create topics in metadata refresh. If set to true,
+		// the broker may auto-create topics that we requested which do not already exist,
+		// if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true.
+		AllowAutoTopicCreation bool
+
+		// SingleFlight controls whether to send a single metadata refresh request at a given time
+		// or whether to allow anyone to refresh the metadata concurrently.
+		// If this is set to true and the client needs to refresh the metadata from different goroutines,
+		// the requests will be batched together so that a single refresh is sent at a time.
+		// See https://github.com/IBM/sarama/issues/3224 for more details.
+		// SingleFlight defaults to true.
+		SingleFlight bool
+	}
+
+	// Producer is the namespace for configuration related to producing messages,
+	// used by the Producer.
+	Producer struct {
+		// The maximum permitted size of a message (defaults to 1000000). Should be
+		// set equal to or smaller than the broker's `message.max.bytes`.
+		MaxMessageBytes int
+		// The level of acknowledgement reliability needed from the broker (defaults
+		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+		// JVM producer.
+		RequiredAcks RequiredAcks
+		// The maximum duration the broker will wait the receipt of the number of
+		// RequiredAcks (defaults to 10 seconds). This is only relevant when
+		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
+		// millisecond resolution, nanoseconds will be truncated. Equivalent to
+		// the JVM producer's `request.timeout.ms` setting.
+		Timeout time.Duration
+		// The type of compression to use on messages (defaults to no compression).
+		// Similar to `compression.codec` setting of the JVM producer.
+		Compression CompressionCodec
+		// The level of compression to use on messages. The meaning depends
+		// on the actual compression type used and defaults to default compression
+		// level for the codec.
+		CompressionLevel int
+		// Generates partitioners for choosing the partition to send messages to
+		// (defaults to hashing the message key). Similar to the `partitioner.class`
+		// setting for the JVM producer.
+		Partitioner PartitionerConstructor
+		// If enabled, the producer will ensure that exactly one copy of each message is
+		// written.
+		Idempotent bool
+		// Transaction specify
+		Transaction struct {
+			// Used in transactions to identify an instance of a producer through restarts
+			ID string
+			// Amount of time a transaction can remain unresolved (neither committed nor aborted)
+			// default is 1 min
+			Timeout time.Duration
+
+			Retry struct {
+				// The total number of times to retry sending a message (default 50).
+				// Similar to the `message.send.max.retries` setting of the JVM producer.
+				Max int
+				// How long to wait for the cluster to settle between retries
+				// (default 10ms). Similar to the `retry.backoff.ms` setting of the
+				// JVM producer.
+				Backoff time.Duration
+				// Called to compute backoff time dynamically. Useful for implementing
+				// more sophisticated backoff strategies. This takes precedence over
+				// `Backoff` if set.
+				BackoffFunc func(retries, maxRetries int) time.Duration
+			}
+		}
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock. If,
+		// however, this config is used to create a `SyncProducer`, both must be set
+		// to true and you shall not read from the channels since the producer does
+		// this internally.
+		Return struct {
+			// If enabled, successfully delivered messages will be returned on the
+			// Successes channel (default disabled).
+			Successes bool
+
+			// If enabled, messages that failed to deliver will be returned on the
+			// Errors channel, including error (default enabled).
+			Errors bool
+		}
+
+		// The following config options control how often messages are batched up and
+		// sent to the broker. By default, messages are sent as fast as possible, and
+		// all messages received while the current batch is in-flight are placed
+		// into the subsequent batch.
+		Flush struct {
+			// The best-effort number of bytes needed to trigger a flush. Use the
+			// global sarama.MaxRequestSize to set a hard upper limit.
+			Bytes int
+			// The best-effort number of messages needed to trigger a flush. Use
+			// `MaxMessages` to set a hard upper limit.
+			Messages int
+			// The best-effort frequency of flushes. Equivalent to
+			// `queue.buffering.max.ms` setting of JVM producer.
+			Frequency time.Duration
+			// The maximum number of messages the producer will send in a single
+			// broker request. Defaults to 0 for unlimited. Similar to
+			// `queue.buffering.max.messages` in the JVM producer.
+			MaxMessages int
+		}
+
+		Retry struct {
+			// The total number of times to retry sending a message (default 3).
+			// Similar to the `message.send.max.retries` setting of the JVM producer.
+			Max int
+			// How long to wait for the cluster to settle between retries
+			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
+			// JVM producer.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+			// The maximum length of the bridging buffer between `input` and `retries` channels
+			// in AsyncProducer#retryHandler.
+			// The limit is to prevent this buffer from overflowing or causing OOM.
+			// Defaults to 0 for unlimited.
+			// Any value between 0 and 4096 is pushed to 4096.
+			// A zero or negative value indicates unlimited.
+			MaxBufferLength int
+			// The maximum total byte size of messages in the bridging buffer between `input`
+			// and `retries` channels in AsyncProducer#retryHandler.
+			// This limit prevents the buffer from consuming excessive memory.
+			// Defaults to 0 for unlimited.
+			// Any value between 0 and 32 MB is pushed to 32 MB.
+			// A zero or negative value indicates unlimited.
+			MaxBufferBytes int64
+		}
+
+		// Interceptors to be called when the producer dispatcher reads the
+		// message for the first time. Interceptors allows to intercept and
+		// possible mutate the message before they are published to Kafka
+		// cluster. *ProducerMessage modified by the first interceptor's
+		// OnSend() is passed to the second interceptor OnSend(), and so on in
+		// the interceptor chain.
+		Interceptors []ProducerInterceptor
+	}
+
+	// Consumer is the namespace for configuration related to consuming messages,
+	// used by the Consumer.
+	Consumer struct {
+		// Group is the namespace for configuring consumer group.
+		Group struct {
+			Session struct {
+				// The timeout used to detect consumer failures when using Kafka's group management facility.
+				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
+				// If no heartbeats are received by the broker before the expiration of this session timeout,
+				// then the broker will remove this consumer from the group and initiate a rebalance.
+				// Note that the value must be in the allowable range as configured in the broker configuration
+				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+				Timeout time.Duration
+			}
+			Heartbeat struct {
+				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
+				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+				// to facilitate rebalancing when new consumers join or leave the group.
+				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+				// higher than 1/3 of that value.
+				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+				Interval time.Duration
+			}
+			Rebalance struct {
+				// Strategy for allocating topic partitions to members.
+				// Deprecated: Strategy exists for historical compatibility
+				// and should not be used. Please use GroupStrategies.
+				Strategy BalanceStrategy
+
+				// GroupStrategies is the priority-ordered list of client-side consumer group
+				// balancing strategies that will be offered to the coordinator. The first
+				// strategy that all group members support will be chosen by the leader.
+				// default: [ NewBalanceStrategyRange() ]
+				GroupStrategies []BalanceStrategy
+
+				// The maximum allowed time for each worker to join the group once a rebalance has begun.
+				// This is basically a limit on the amount of time needed for all tasks to flush any pending
+				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+				// the group, which will cause offset commit failures (default 60s).
+				Timeout time.Duration
+
+				Retry struct {
+					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+					// the load to assign partitions to each consumer. If the set of consumers changes while
+					// this assignment is taking place the rebalance will fail and retry. This setting controls
+					// the maximum number of attempts before giving up (default 4).
+					Max int
+					// Backoff time between retries during rebalance (default 2s)
+					Backoff time.Duration
+				}
+			}
+			Member struct {
+				// Custom metadata to include when joining the group. The user data for all joined members
+				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
+				// coordinator for the group.
+				UserData []byte
+			}
+
+			// support KIP-345
+			InstanceId string
+
+			// If true, consumer offsets will be automatically reset to configured Initial value
+			// if the fetched consumer offset is out of range of available offsets. Out of range
+			// can happen if the data has been deleted from the server, or during situations of
+			// under-replication where a replica does not have all the data yet. It can be
+			// dangerous to reset the offset automatically, particularly in the latter case. Defaults
+			// to true to maintain existing behavior.
+			ResetInvalidOffsets bool
+		}
+
+		Retry struct {
+			// How long to wait after a failing to read from a partition before
+			// trying again (default 2s).
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries int) time.Duration
+		}
+
+		// Fetch is the namespace for controlling how many bytes are retrieved by any
+		// given request.
+		Fetch struct {
+			// The minimum number of message bytes to fetch in a request - the broker
+			// will wait until at least this many are available. The default is 1,
+			// as 0 causes the consumer to spin when no messages are available.
+			// Equivalent to the JVM's `fetch.min.bytes`.
+			Min int32
+			// The default number of message bytes to fetch from the broker in each
+			// request (default 1MB). This should be larger than the majority of
+			// your messages, or else the consumer will spend a lot of time
+			// negotiating sizes and not actually consuming. Similar to the JVM's
+			// `fetch.message.max.bytes`.
+			Default int32
+			// The maximum number of message bytes to fetch from the broker in a
+			// single request. Messages larger than this will return
+			// ErrMessageTooLarge and will not be consumable, so you must be sure
+			// this is at least as large as your largest message. Defaults to 0
+			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+			// global `sarama.MaxResponseSize` still applies.
+			Max int32
+		}
+		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
+		// bytes to become available before it returns fewer than that anyways. The
+		// default is 250ms, since 0 causes the consumer to spin when no events are
+		// available. 100-500ms is a reasonable range for most cases. Kafka only
+		// supports precision up to milliseconds; nanoseconds will be truncated.
+		// Equivalent to the JVM's `fetch.max.wait.ms`.
+		MaxWaitTime time.Duration
+
+		// The maximum amount of time the consumer expects a message takes to
+		// process for the user. If writing to the Messages channel takes longer
+		// than this, that partition will stop fetching more messages until it
+		// can proceed again.
+		// Note that, since the Messages channel is buffered, the actual grace time is
+		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
+		// If a message is not written to the Messages channel between two ticks
+		// of the expiryTicker then a timeout is detected.
+		// Using a ticker instead of a timer to detect timeouts should typically
+		// result in many fewer calls to Timer functions which may result in a
+		// significant performance improvement if many messages are being sent
+		// and timeouts are infrequent.
+		// The disadvantage of using a ticker instead of a timer is that
+		// timeouts will be less accurate. That is, the effective timeout could
+		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+		// between two messages being sent may not be recognized as a timeout.
+		MaxProcessingTime time.Duration
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from them to prevent deadlock.
+		Return struct {
+			// If enabled, any errors that occurred while consuming are returned on
+			// the Errors channel (default disabled).
+			Errors bool
+		}
+
+		// Offsets specifies configuration for how and when to commit consumed
+		// offsets. This currently requires the manual use of an OffsetManager
+		// but will eventually be automated.
+		Offsets struct {
+			// Deprecated: CommitInterval exists for historical compatibility
+			// and should not be used. Please use Consumer.Offsets.AutoCommit
+			CommitInterval time.Duration
+
+			// AutoCommit specifies configuration for commit messages automatically.
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
+
+			// The initial offset to use if no offset was previously committed.
+			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+			Initial int64
+
+			// The retention duration for committed offsets. If zero, disabled
+			// (in which case the `offsets.retention.minutes` option on the
+			// broker will be used).  Kafka only supports precision up to
+			// milliseconds; nanoseconds will be truncated. Requires Kafka
+			// broker version 0.9.0 or later.
+			// (default is 0: disabled).
+			Retention time.Duration
+
+			Retry struct {
+				// The total number of times to retry failing commit
+				// requests during OffsetManager shutdown (default 3).
+				Max int
+			}
+		}
+
+		// IsolationLevel support 2 mode:
+		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
+		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
+		IsolationLevel IsolationLevel
+
+		// Interceptors to be called just before the record is sent to the
+		// messages channel. Interceptors allows to intercept and possible
+		// mutate the message before they are returned to the client.
+		// *ConsumerMessage modified by the first interceptor's OnConsume() is
+		// passed to the second interceptor OnConsume(), and so on in the
+		// interceptor chain.
+		Interceptors []ConsumerInterceptor
+	}
+
+	// A user-provided string sent with every request to the brokers for logging,
+	// debugging, and auditing purposes. Defaults to "sarama", but you should
+	// probably set it to something specific to your application.
+	ClientID string
+	// A rack identifier for this client. This can be any string value which
+	// indicates where this client is physically located.
+	// It corresponds with the broker config 'broker.rack'
+	RackID string
+	// The number of events to buffer in internal and external channels. This
+	// permits the producer and consumer to continue processing some messages
+	// in the background while user code is working, greatly improving throughput.
+	// Defaults to 256.
+	ChannelBufferSize int
+	// ApiVersionsRequest determines whether Sarama should send an
+	// ApiVersionsRequest message to each broker as part of its initial
+	// connection. This defaults to `true` to match the official Java client
+	// and most 3rdparty ones.
+	ApiVersionsRequest bool
+	// The version of Kafka that Sarama will assume it is running against.
+	// Defaults to the oldest supported stable version. Since Kafka provides
+	// backwards-compatibility, setting it to a version older than you have
+	// will not break anything, although it may prevent you from using the
+	// latest features. Setting it to a version greater than you are actually
+	// running may lead to random breakage.
+	Version KafkaVersion
+	// The registry to define metrics into.
+	// Defaults to a local registry.
+	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+	// prior to starting Sarama.
+	// See Examples on how to use the metrics registry
+	MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{}
+
+	c.Admin.Retry.Max = 5
+	c.Admin.Retry.Backoff = 100 * time.Millisecond
+	c.Admin.Timeout = 3 * time.Second
+
+	c.Net.MaxOpenRequests = 5
+	c.Net.DialTimeout = 30 * time.Second
+	c.Net.ReadTimeout = 30 * time.Second
+	c.Net.WriteTimeout = 30 * time.Second
+	c.Net.SASL.Handshake = true
+	c.Net.SASL.Version = SASLHandshakeV1
+
+	c.Metadata.Retry.Max = 3
+	c.Metadata.Retry.Backoff = 250 * time.Millisecond
+	c.Metadata.RefreshFrequency = 10 * time.Minute
+	c.Metadata.Full = true
+	c.Metadata.AllowAutoTopicCreation = true
+	c.Metadata.SingleFlight = true
+
+	c.Producer.MaxMessageBytes = 1024 * 1024
+	c.Producer.RequiredAcks = WaitForLocal
+	c.Producer.Timeout = 10 * time.Second
+	c.Producer.Partitioner = NewHashPartitioner
+	c.Producer.Retry.Max = 3
+	c.Producer.Retry.Backoff = 100 * time.Millisecond
+	c.Producer.Return.Errors = true
+	c.Producer.CompressionLevel = CompressionLevelDefault
+
+	c.Producer.Transaction.Timeout = 1 * time.Minute
+	c.Producer.Transaction.Retry.Max = 50
+	c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond
+
+	c.Consumer.Fetch.Min = 1
+	c.Consumer.Fetch.Default = 1024 * 1024
+	c.Consumer.Retry.Backoff = 2 * time.Second
+	c.Consumer.MaxWaitTime = 500 * time.Millisecond
+	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+	c.Consumer.Return.Errors = false
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
+	c.Consumer.Offsets.Initial = OffsetNewest
+	c.Consumer.Offsets.Retry.Max = 3
+
+	c.Consumer.Group.Session.Timeout = 10 * time.Second
+	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+	c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()}
+	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+	c.Consumer.Group.Rebalance.Retry.Max = 4
+	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+	c.Consumer.Group.ResetInvalidOffsets = true
+
+	c.ClientID = defaultClientID
+	c.ChannelBufferSize = 256
+	c.ApiVersionsRequest = true
+	c.Version = DefaultVersion
+	c.MetricRegistry = metrics.NewRegistry()
+
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+//
+//nolint:gocyclo // This function's cyclomatic complexity has go beyond 100
+func (c *Config) Validate() error {
+	// some configuration values should be warned on but not fail completely, do those first
+	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
+		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+	}
+	if !c.Net.SASL.Enable {
+		if c.Net.SASL.User != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+		}
+		if c.Net.SASL.Password != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+		}
+	}
+	if c.Producer.RequiredAcks > 1 {
+		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+	}
+	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+	}
+	if c.Producer.Timeout%time.Millisecond != 0 {
+		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+	}
+	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+	}
+	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.ClientID == defaultClientID {
+		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+	}
+
+	// validate Net values
+	switch {
+	case c.Net.MaxOpenRequests <= 0:
+		return ConfigurationError("Net.MaxOpenRequests must be > 0")
+	case c.Net.DialTimeout <= 0:
+		return ConfigurationError("Net.DialTimeout must be > 0")
+	case c.Net.ReadTimeout <= 0:
+		return ConfigurationError("Net.ReadTimeout must be > 0")
+	case c.Net.WriteTimeout <= 0:
+		return ConfigurationError("Net.WriteTimeout must be > 0")
+	case c.Net.SASL.Enable:
+		if c.Net.SASL.Mechanism == "" {
+			c.Net.SASL.Mechanism = SASLTypePlaintext
+		}
+		if c.Net.SASL.Version == SASLHandshakeV0 && c.ApiVersionsRequest {
+			return ConfigurationError("ApiVersionsRequest must be disabled when SASL v0 is enabled")
+		}
+		switch c.Net.SASL.Mechanism {
+		case SASLTypePlaintext:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+		case SASLTypeOAuth:
+			if c.Net.SASL.TokenProvider == nil {
+				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
+			}
+		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+			}
+		case SASLTypeGSSAPI:
+			if c.Net.SASL.GSSAPI.ServiceName == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+			}
+
+			switch c.Net.SASL.GSSAPI.AuthType {
+			case KRB5_USER_AUTH:
+				if c.Net.SASL.GSSAPI.Password == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+				}
+			case KRB5_KEYTAB_AUTH:
+				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+						" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+				}
+			case KRB5_CCACHE_AUTH:
+				if c.Net.SASL.GSSAPI.CCachePath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" +
+						" and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH")
+				}
+			default:
+				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH")
+			}
+
+			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Username == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Realm == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+			}
+		default:
+			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
+			return ConfigurationError(msg)
+		}
+	}
+
+	// validate the Admin values
+	switch {
+	case c.Admin.Timeout <= 0:
+		return ConfigurationError("Admin.Timeout must be > 0")
+	}
+
+	// validate the Metadata values
+	switch {
+	case c.Metadata.Retry.Max < 0:
+		return ConfigurationError("Metadata.Retry.Max must be >= 0")
+	case c.Metadata.Retry.Backoff < 0:
+		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+	case c.Metadata.RefreshFrequency < 0:
+		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+	}
+
+	// validate the Producer values
+	switch {
+	case c.Producer.MaxMessageBytes <= 0:
+		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+	case c.Producer.RequiredAcks < -1:
+		return ConfigurationError("Producer.RequiredAcks must be >= -1")
+	case c.Producer.Timeout <= 0:
+		return ConfigurationError("Producer.Timeout must be > 0")
+	case c.Producer.Partitioner == nil:
+		return ConfigurationError("Producer.Partitioner must not be nil")
+	case c.Producer.Flush.Bytes < 0:
+		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+	case c.Producer.Flush.Messages < 0:
+		return ConfigurationError("Producer.Flush.Messages must be >= 0")
+	case c.Producer.Flush.Frequency < 0:
+		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+	case c.Producer.Flush.MaxMessages < 0:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+	case c.Producer.Retry.Max < 0:
+		return ConfigurationError("Producer.Retry.Max must be >= 0")
+	case c.Producer.Retry.Backoff < 0:
+		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+	}
+
+	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+	}
+
+	if c.Producer.Compression == CompressionGZIP {
+		if c.Producer.CompressionLevel != CompressionLevelDefault {
+			if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil {
+				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+			}
+		}
+	}
+
+	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
+		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
+	}
+
+	if c.Producer.Idempotent {
+		if !c.Version.IsAtLeast(V0_11_0_0) {
+			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+		}
+		if c.Producer.Retry.Max == 0 {
+			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+		}
+		if c.Producer.RequiredAcks != WaitForAll {
+			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+		}
+		if c.Net.MaxOpenRequests > 1 {
+			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+		}
+	}
+
+	if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent {
+		return ConfigurationError("Transactional producer requires Idempotent to be true")
+	}
+
+	// validate the Consumer values
+	switch {
+	case c.Consumer.Fetch.Min <= 0:
+		return ConfigurationError("Consumer.Fetch.Min must be > 0")
+	case c.Consumer.Fetch.Default <= 0:
+		return ConfigurationError("Consumer.Fetch.Default must be > 0")
+	case c.Consumer.Fetch.Max < 0:
+		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+	case c.Consumer.MaxProcessingTime <= 0:
+		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+	case c.Consumer.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
+	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+	case c.Consumer.Offsets.Retry.Max < 0:
+		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+	}
+
+	if c.Consumer.Offsets.CommitInterval != 0 {
+		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
+	}
+	if c.Consumer.Group.Rebalance.Strategy != nil {
+		Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies")
+	}
+
+	// validate IsolationLevel
+	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
+	}
+
+	// validate the Consumer Group values
+	switch {
+	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+	case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0:
+		return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty")
+	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+	case c.Consumer.Group.Rebalance.Retry.Max < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+	}
+
+	for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies {
+		if strategy == nil {
+			return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty")
+		}
+	}
+
+	if c.Consumer.Group.InstanceId != "" {
+		if !c.Version.IsAtLeast(V2_3_0_0) {
+			return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3")
+		}
+		if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil {
+			return err
+		}
+	}
+
+	// validate misc shared values
+	switch {
+	case c.ChannelBufferSize < 0:
+		return ConfigurationError("ChannelBufferSize must be >= 0")
+	}
+
+	// only validate clientID locally for Kafka versions before KIP-190 was implemented
+	if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) {
+		return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID))
+	}
+
+	return nil
+}
+
+func (c *Config) getDialer() proxy.Dialer {
+	if c.Net.Proxy.Enable {
+		Logger.Println("using proxy")
+		return c.Net.Proxy.Dialer
+	} else {
+		return &net.Dialer{
+			Timeout:   c.Net.DialTimeout,
+			KeepAlive: c.Net.KeepAlive,
+			LocalAddr: c.Net.LocalAddr,
+		}
+	}
+}
+
+const MAX_GROUP_INSTANCE_ID_LENGTH = 249
+
+var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`)
+
+func validateGroupInstanceId(id string) error {
+	if id == "" {
+		return ConfigurationError("Group instance id must be non-empty string")
+	}
+	if id == "." || id == ".." {
+		return ConfigurationError(`Group instance id cannot be "." or ".."`)
+	}
+	if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH {
+		return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id))
+	}
+	if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) {
+		return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id))
+	}
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/config_resource_type.go
rename to vendor/github.com/IBM/sarama/config_resource_type.go
diff --git a/vendor/github.com/IBM/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go
new file mode 100644
index 0000000..4c96af4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer.go
@@ -0,0 +1,1136 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
+	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
+	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
+
+	Key, Value []byte
+	Topic      string
+	Partition  int32
+	Offset     int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func (ce ConsumerError) Error() string {
+	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+func (ce ConsumerError) Unwrap() error {
+	return ce.Err
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+type Consumer interface {
+	// Topics returns the set of available topics as retrieved from the cluster
+	// metadata. This method is the same as Client.Topics(), and is provided for
+	// convenience.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	// This method is the same as Client.Partitions(), and is provided for convenience.
+	Partitions(topic string) ([]int32, error)
+
+	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
+	// the given offset. It will return an error if this Consumer is already consuming
+	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+	// or OffsetOldest
+	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+	// HighWaterMarks returns the current high water marks for each topic and partition.
+	// Consistency between partitions is not guaranteed since high water marks are updated separately.
+	HighWaterMarks() map[string]map[int32]int64
+
+	// Close shuts down the consumer. It must be called after all child
+	// PartitionConsumers have already been closed.
+	Close() error
+
+	// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause(topicPartitions map[string][]int32)
+
+	// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	Resume(topicPartitions map[string][]int32)
+
+	// PauseAll suspends fetching from all partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	PauseAll()
+
+	// ResumeAll resumes all partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	ResumeAll()
+}
+
+// max time to wait for more partition subscriptions
+const partitionConsumersBatchTimeout = 100 * time.Millisecond
+
+type consumer struct {
+	conf            *Config
+	children        map[string]map[int32]*partitionConsumer
+	brokerConsumers map[*Broker]*brokerConsumer
+	client          Client
+	metricRegistry  metrics.Registry
+	lock            sync.Mutex
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newConsumer(client)
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	c := &consumer{
+		client:          client,
+		conf:            client.Config(),
+		children:        make(map[string]map[int32]*partitionConsumer),
+		brokerConsumers: make(map[*Broker]*brokerConsumer),
+		metricRegistry:  newCleanupRegistry(client.Config().MetricRegistry),
+	}
+
+	return c, nil
+}
+
+func (c *consumer) Close() error {
+	c.metricRegistry.UnregisterAll()
+	return c.client.Close()
+}
+
+func (c *consumer) Topics() ([]string, error) {
+	return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+	return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+	child := &partitionConsumer{
+		consumer:             c,
+		conf:                 c.conf,
+		topic:                topic,
+		partition:            partition,
+		messages:             make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+		errors:               make(chan *ConsumerError, c.conf.ChannelBufferSize),
+		feeder:               make(chan *FetchResponse, 1),
+		leaderEpoch:          invalidLeaderEpoch,
+		preferredReadReplica: invalidPreferredReplicaID,
+		trigger:              make(chan none, 1),
+		dying:                make(chan none),
+		fetchSize:            c.conf.Consumer.Fetch.Default,
+	}
+
+	if err := child.chooseStartingOffset(offset); err != nil {
+		return nil, err
+	}
+
+	leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := c.addChild(child); err != nil {
+		return nil, err
+	}
+
+	go withRecover(child.dispatcher)
+	go withRecover(child.responseFeeder)
+
+	child.leaderEpoch = epoch
+	child.broker = c.refBrokerConsumer(leader)
+	child.broker.input <- child
+
+	return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	hwms := make(map[string]map[int32]int64)
+	for topic, p := range c.children {
+		hwm := make(map[int32]int64, len(p))
+		for partition, pc := range p {
+			hwm[partition] = pc.HighWaterMarkOffset()
+		}
+		hwms[topic] = hwm
+	}
+
+	return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	topicChildren := c.children[child.topic]
+	if topicChildren == nil {
+		topicChildren = make(map[int32]*partitionConsumer)
+		c.children[child.topic] = topicChildren
+	}
+
+	if topicChildren[child.partition] != nil {
+		return ConfigurationError("That topic/partition is already being consumed")
+	}
+
+	topicChildren[child.partition] = child
+	return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	bc := c.brokerConsumers[broker]
+	if bc == nil {
+		bc = c.newBrokerConsumer(broker)
+		c.brokerConsumers[broker] = bc
+	}
+
+	bc.refs++
+
+	return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	brokerWorker.refs--
+
+	if brokerWorker.refs == 0 {
+		close(brokerWorker.input)
+		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+			delete(c.brokerConsumers, brokerWorker.broker)
+		}
+	}
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// Pause implements Consumer.
+func (c *consumer) Pause(topicPartitions map[string][]int32) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for topic, partitions := range topicPartitions {
+		for _, partition := range partitions {
+			if topicConsumers, ok := c.children[topic]; ok {
+				if partitionConsumer, ok := topicConsumers[partition]; ok {
+					partitionConsumer.Pause()
+				}
+			}
+		}
+	}
+}
+
+// Resume implements Consumer.
+func (c *consumer) Resume(topicPartitions map[string][]int32) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for topic, partitions := range topicPartitions {
+		for _, partition := range partitions {
+			if topicConsumers, ok := c.children[topic]; ok {
+				if partitionConsumer, ok := topicConsumers[partition]; ok {
+					partitionConsumer.Resume()
+				}
+			}
+		}
+	}
+}
+
+// PauseAll implements Consumer.
+func (c *consumer) PauseAll() {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for _, partitions := range c.children {
+		for _, partitionConsumer := range partitions {
+			partitionConsumer.Pause()
+		}
+	}
+}
+
+// ResumeAll implements Consumer.
+func (c *consumer) ResumeAll() {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for _, partitions := range c.children {
+		for _, partitionConsumer := range partitions {
+			partitionConsumer.Resume()
+		}
+	}
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+	// this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+	// the Messages channel when this function is called, you will be competing with Close for messages; consider
+	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+	Close() error
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker.
+	Messages() <-chan *ConsumerMessage
+
+	// Errors returns a read channel of errors that occurred during consuming, if
+	// enabled. By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Pause suspends fetching from this partition. Future calls to the broker will not return
+	// any records from these partition until it have been resumed using Resume().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause()
+
+	// Resume resumes this partition which have been paused with Pause().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	// If the partition was not previously paused, this method is a no-op.
+	Resume()
+
+	// IsPaused indicates if this partition consumer is paused or not
+	IsPaused() bool
+}
+
+type partitionConsumer struct {
+	highWaterMarkOffset atomic.Int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+	consumer *consumer
+	conf     *Config
+	broker   *brokerConsumer
+	messages chan *ConsumerMessage
+	errors   chan *ConsumerError
+	feeder   chan *FetchResponse
+
+	leaderEpoch          int32
+	preferredReadReplica int32
+
+	trigger, dying chan none
+	closeOnce      sync.Once
+	topic          string
+	partition      int32
+	responseResult error
+	fetchSize      int32
+	offset         int64
+	retries        atomic.Int32
+
+	paused atomic.Bool // accessed atomically, 0 = not paused, 1 = paused
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+	cErr := &ConsumerError{
+		Topic:     child.topic,
+		Partition: child.partition,
+		Err:       err,
+	}
+
+	if child.conf.Consumer.Return.Errors {
+		child.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (child *partitionConsumer) computeBackoff() time.Duration {
+	if child.conf.Consumer.Retry.BackoffFunc != nil {
+		retries := child.retries.Add(1)
+		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
+	}
+	return child.conf.Consumer.Retry.Backoff
+}
+
+func (child *partitionConsumer) dispatcher() {
+	for range child.trigger {
+		select {
+		case <-child.dying:
+			close(child.trigger)
+		case <-time.After(child.computeBackoff()):
+			if child.broker != nil {
+				child.consumer.unrefBrokerConsumer(child.broker)
+				child.broker = nil
+			}
+
+			if err := child.dispatch(); err != nil {
+				child.sendError(err)
+				child.trigger <- none{}
+			}
+		}
+	}
+
+	if child.broker != nil {
+		child.consumer.unrefBrokerConsumer(child.broker)
+	}
+	child.consumer.removeChild(child)
+	close(child.feeder)
+}
+
+func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) {
+	if child.preferredReadReplica >= 0 {
+		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
+		if err == nil {
+			return broker, child.leaderEpoch, nil
+		}
+		Logger.Printf(
+			"consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader",
+			child.topic, child.partition, child.preferredReadReplica)
+
+		// if we couldn't find it, discard the replica preference and trigger a
+		// metadata refresh whilst falling back to consuming from the leader again
+		child.preferredReadReplica = invalidPreferredReplicaID
+		_ = child.consumer.client.RefreshMetadata(child.topic)
+	}
+
+	// if preferred replica cannot be found fallback to leader
+	return child.consumer.client.LeaderAndEpoch(child.topic, child.partition)
+}
+
+func (child *partitionConsumer) dispatch() error {
+	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+		return err
+	}
+
+	broker, epoch, err := child.preferredBroker()
+	if err != nil {
+		return err
+	}
+
+	child.leaderEpoch = epoch
+	child.broker = child.consumer.refBrokerConsumer(broker)
+	child.broker.input <- child
+
+	return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+	if err != nil {
+		return err
+	}
+
+	child.highWaterMarkOffset.Store(newestOffset)
+
+	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case offset == OffsetNewest:
+		child.offset = newestOffset
+	case offset == OffsetOldest:
+		child.offset = oldestOffset
+	case offset >= oldestOffset && offset <= newestOffset:
+		child.offset = offset
+	default:
+		return ErrOffsetOutOfRange
+	}
+
+	return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+	return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+	return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+	// also just close itself)
+	child.closeOnce.Do(func() {
+		close(child.dying)
+	})
+}
+
+func (child *partitionConsumer) Close() error {
+	child.AsyncClose()
+
+	var consumerErrors ConsumerErrors
+	for err := range child.errors {
+		consumerErrors = append(consumerErrors, err)
+	}
+
+	if len(consumerErrors) > 0 {
+		return consumerErrors
+	}
+	return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+	return child.highWaterMarkOffset.Load()
+}
+
+func (child *partitionConsumer) responseFeeder() {
+	var msgs []*ConsumerMessage
+	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+	firstAttempt := true
+
+feederLoop:
+	for response := range child.feeder {
+		msgs, child.responseResult = child.parseResponse(response)
+
+		if child.responseResult == nil {
+			child.retries.Store(0)
+		}
+
+		for i, msg := range msgs {
+			child.interceptors(msg)
+		messageSelect:
+			select {
+			case <-child.dying:
+				child.broker.acks.Done()
+				continue feederLoop
+			case child.messages <- msg:
+				firstAttempt = true
+			case <-expiryTicker.C:
+				if !firstAttempt {
+					child.responseResult = errTimedOut
+					child.broker.acks.Done()
+				remainingLoop:
+					for _, msg = range msgs[i:] {
+						child.interceptors(msg)
+						select {
+						case child.messages <- msg:
+						case <-child.dying:
+							break remainingLoop
+						}
+					}
+					child.broker.input <- child
+					continue feederLoop
+				} else {
+					// current message has not been sent, return to select
+					// statement
+					firstAttempt = false
+					goto messageSelect
+				}
+			}
+		}
+
+		child.broker.acks.Done()
+	}
+
+	expiryTicker.Stop()
+	close(child.messages)
+	close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	for _, msgBlock := range msgSet.Messages {
+		for _, msg := range msgBlock.Messages() {
+			offset := msg.Offset
+			timestamp := msg.Msg.Timestamp
+			if msg.Msg.Version >= 1 {
+				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+				offset += baseOffset
+				if msg.Msg.LogAppendTime {
+					timestamp = msgBlock.Msg.Timestamp
+				}
+			}
+			if offset < child.offset {
+				continue
+			}
+			messages = append(messages, &ConsumerMessage{
+				Topic:          child.topic,
+				Partition:      child.partition,
+				Key:            msg.Msg.Key,
+				Value:          msg.Msg.Value,
+				Offset:         offset,
+				Timestamp:      timestamp,
+				BlockTimestamp: msgBlock.Msg.Timestamp,
+			})
+			child.offset = offset + 1
+		}
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+	messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
+	for _, rec := range batch.Records {
+		offset := batch.FirstOffset + rec.OffsetDelta
+		if offset < child.offset {
+			continue
+		}
+		timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
+		if batch.LogAppendTime {
+			timestamp = batch.MaxTimestamp
+		}
+		messages = append(messages, &ConsumerMessage{
+			Topic:     child.topic,
+			Partition: child.partition,
+			Key:       rec.Key,
+			Value:     rec.Value,
+			Offset:    offset,
+			Timestamp: timestamp,
+			Headers:   rec.Headers,
+		})
+		child.offset = offset + 1
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	var consumerBatchSizeMetric metrics.Histogram
+	if child.consumer != nil && child.consumer.metricRegistry != nil {
+		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry)
+	}
+
+	// If request was throttled and empty we log and return without error
+	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+		Logger.Printf(
+			"consumer/broker/%d FetchResponse throttled %v\n",
+			child.broker.broker.ID(), response.ThrottleTime)
+		return nil, nil
+	}
+
+	block := response.GetBlock(child.topic, child.partition)
+	if block == nil {
+		return nil, ErrIncompleteResponse
+	}
+
+	if !errors.Is(block.Err, ErrNoError) {
+		return nil, block.Err
+	}
+
+	nRecs, err := block.numRecords()
+	if err != nil {
+		return nil, err
+	}
+
+	if consumerBatchSizeMetric != nil {
+		consumerBatchSizeMetric.Update(int64(nRecs))
+	}
+
+	if block.PreferredReadReplica != invalidPreferredReplicaID {
+		child.preferredReadReplica = block.PreferredReadReplica
+	}
+
+	if nRecs == 0 {
+		partialTrailingMessage, err := block.isPartial()
+		if err != nil {
+			return nil, err
+		}
+		// We got no messages. If we got a trailing one then we need to ask for more data.
+		// Otherwise we just poll again and wait for one to be produced...
+		if partialTrailingMessage {
+			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+				// we can't ask for more data, we've hit the configured limit
+				child.sendError(ErrMessageTooLarge)
+				child.offset++ // skip this one so we can keep processing future messages
+			} else {
+				child.fetchSize *= 2
+				// check int32 overflow
+				if child.fetchSize < 0 {
+					child.fetchSize = math.MaxInt32
+				}
+				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+					child.fetchSize = child.conf.Consumer.Fetch.Max
+				}
+			}
+		} else if block.recordsNextOffset != nil && *block.recordsNextOffset <= block.HighWaterMarkOffset {
+			// check last record next offset to avoid stuck if high watermark was not reached
+			Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, next offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.recordsNextOffset)
+			child.offset = *block.recordsNextOffset
+		}
+
+		return nil, nil
+	}
+
+	// we got messages, reset our fetch size in case it was increased for a previous request
+	child.fetchSize = child.conf.Consumer.Fetch.Default
+	child.highWaterMarkOffset.Store(block.HighWaterMarkOffset)
+
+	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
+	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+	abortedTransactions := block.getAbortedTransactions()
+
+	var messages []*ConsumerMessage
+	for _, records := range block.RecordsSet {
+		switch records.recordsType {
+		case legacyRecords:
+			messageSetMessages, err := child.parseMessages(records.MsgSet)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, messageSetMessages...)
+		case defaultRecords:
+			// Consume remaining abortedTransaction up to last offset of current batch
+			for _, txn := range abortedTransactions {
+				if txn.FirstOffset > records.RecordBatch.LastOffset() {
+					break
+				}
+				abortedProducerIDs[txn.ProducerID] = struct{}{}
+				// Pop abortedTransactions so that we never add it again
+				abortedTransactions = abortedTransactions[1:]
+			}
+
+			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+			if err != nil {
+				return nil, err
+			}
+
+			// Parse and commit offset but do not expose messages that are:
+			// - control records
+			// - part of an aborted transaction when set to `ReadCommitted`
+
+			// control record
+			isControl, err := records.isControl()
+			if err != nil {
+				// I don't know why there is this continue in case of error to begin with
+				// Safe bet is to ignore control messages if ReadUncommitted
+				// and block on them in case of error and ReadCommitted
+				if child.conf.Consumer.IsolationLevel == ReadCommitted {
+					return nil, err
+				}
+				continue
+			}
+			if isControl {
+				controlRecord, err := records.getControlRecord()
+				if err != nil {
+					return nil, err
+				}
+
+				if controlRecord.Type == ControlRecordAbort {
+					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+				}
+				continue
+			}
+
+			// filter aborted transactions
+			if child.conf.Consumer.IsolationLevel == ReadCommitted {
+				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+				if records.RecordBatch.IsTransactional && isAborted {
+					continue
+				}
+			}
+
+			messages = append(messages, recordBatchMessages...)
+		default:
+			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+		}
+	}
+
+	return messages, nil
+}
+
+func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
+	for _, interceptor := range child.conf.Consumer.Interceptors {
+		msg.safelyApplyInterceptor(interceptor)
+	}
+}
+
+// Pause implements PartitionConsumer.
+func (child *partitionConsumer) Pause() {
+	child.paused.Store(true)
+}
+
+// Resume implements PartitionConsumer.
+func (child *partitionConsumer) Resume() {
+	child.paused.Store(false)
+}
+
+// IsPaused implements PartitionConsumer.
+func (child *partitionConsumer) IsPaused() bool {
+	return child.paused.Load()
+}
+
+type brokerConsumer struct {
+	consumer         *consumer
+	broker           *Broker
+	input            chan *partitionConsumer
+	newSubscriptions chan []*partitionConsumer
+	subscriptions    map[*partitionConsumer]none
+	acks             sync.WaitGroup
+	refs             int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+	bc := &brokerConsumer{
+		consumer:         c,
+		broker:           broker,
+		input:            make(chan *partitionConsumer),
+		newSubscriptions: make(chan []*partitionConsumer),
+		subscriptions:    make(map[*partitionConsumer]none),
+		refs:             0,
+	}
+
+	go withRecover(bc.subscriptionManager)
+	go withRecover(bc.subscriptionConsumer)
+
+	return bc
+}
+
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available.
+func (bc *brokerConsumer) subscriptionManager() {
+	defer close(bc.newSubscriptions)
+
+	for {
+		var partitionConsumers []*partitionConsumer
+
+		// Check for any partition consumer asking to subscribe if there aren't
+		// any, trigger the network request (to fetch Kafka messages) by sending "nil" to the
+		// newSubscriptions channel
+		select {
+		case pc, ok := <-bc.input:
+			if !ok {
+				return
+			}
+			partitionConsumers = append(partitionConsumers, pc)
+		case bc.newSubscriptions <- nil:
+			continue
+		}
+
+		// drain input of any further incoming subscriptions
+		timer := time.NewTimer(partitionConsumersBatchTimeout)
+		for batchComplete := false; !batchComplete; {
+			select {
+			case pc := <-bc.input:
+				partitionConsumers = append(partitionConsumers, pc)
+			case <-timer.C:
+				batchComplete = true
+			}
+		}
+		timer.Stop()
+
+		Logger.Printf(
+			"consumer/broker/%d accumulated %d new subscriptions\n",
+			bc.broker.ID(), len(partitionConsumers))
+
+		bc.newSubscriptions <- partitionConsumers
+	}
+}
+
+// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+// this is the main loop that fetches Kafka messages
+func (bc *brokerConsumer) subscriptionConsumer() {
+	for newSubscriptions := range bc.newSubscriptions {
+		bc.updateSubscriptions(newSubscriptions)
+
+		if len(bc.subscriptions) == 0 {
+			// We're about to be shut down or we're about to receive more subscriptions.
+			// Take a small nap to avoid burning the CPU.
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+
+		response, err := bc.fetchNewMessages()
+		if err != nil {
+			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+			bc.abort(err)
+			return
+		}
+
+		// if there isn't response, it means that not fetch was made
+		// so we don't need to handle any response
+		if response == nil {
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+
+		bc.acks.Add(len(bc.subscriptions))
+		for child := range bc.subscriptions {
+			if _, ok := response.Blocks[child.topic]; !ok {
+				bc.acks.Done()
+				continue
+			}
+
+			if _, ok := response.Blocks[child.topic][child.partition]; !ok {
+				bc.acks.Done()
+				continue
+			}
+
+			child.feeder <- response
+		}
+		bc.acks.Wait()
+		bc.handleResponses()
+	}
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+	for _, child := range newSubscriptions {
+		bc.subscriptions[child] = none{}
+		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+	}
+
+	for child := range bc.subscriptions {
+		select {
+		case <-child.dying:
+			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		default:
+			// no-op
+		}
+	}
+}
+
+// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+func (bc *brokerConsumer) handleResponses() {
+	for child := range bc.subscriptions {
+		result := child.responseResult
+		child.responseResult = nil
+
+		if result == nil {
+			if preferredBroker, _, err := child.preferredBroker(); err == nil {
+				if bc.broker.ID() != preferredBroker.ID() {
+					// not an error but needs redispatching to consume from preferred replica
+					Logger.Printf(
+						"consumer/broker/%d abandoned in favor of preferred replica broker/%d\n",
+						bc.broker.ID(), preferredBroker.ID())
+					child.trigger <- none{}
+					delete(bc.subscriptions, child)
+				}
+			}
+			continue
+		}
+
+		// Discard any replica preference.
+		child.preferredReadReplica = invalidPreferredReplicaID
+
+		if errors.Is(result, errTimedOut) {
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+				bc.broker.ID(), child.topic, child.partition)
+			delete(bc.subscriptions, child)
+		} else if errors.Is(result, ErrOffsetOutOfRange) {
+			// there's no point in retrying this it will just fail the same way again
+			// shut it down and force the user to choose what to do
+			child.sendError(result)
+			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		} else if errors.Is(result, ErrUnknownTopicOrPartition) ||
+			errors.Is(result, ErrNotLeaderForPartition) ||
+			errors.Is(result, ErrLeaderNotAvailable) ||
+			errors.Is(result, ErrReplicaNotAvailable) ||
+			errors.Is(result, ErrFencedLeaderEpoch) ||
+			errors.Is(result, ErrUnknownLeaderEpoch) {
+			// not an error, but does need redispatching
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		} else {
+			// dunno, tell the user and try redispatching
+			child.sendError(result)
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		}
+	}
+}
+
+func (bc *brokerConsumer) abort(err error) {
+	bc.consumer.abandonBrokerConsumer(bc)
+	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+	for child := range bc.subscriptions {
+		child.sendError(err)
+		child.trigger <- none{}
+	}
+
+	for newSubscriptions := range bc.newSubscriptions {
+		if len(newSubscriptions) == 0 {
+			// Take a small nap to avoid burning the CPU.
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+		for _, child := range newSubscriptions {
+			child.sendError(err)
+			child.trigger <- none{}
+		}
+	}
+}
+
+// fetchNewMessages can be nil if no fetch is made, it can occur when
+// all partitions are paused
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+	request := &FetchRequest{
+		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
+		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+	}
+	// Version 1 is the same as version 0.
+	if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
+		request.Version = 1
+	}
+	// Starting in Version 2, the requestor must be able to handle Kafka Log
+	// Message format version 1.
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 2
+	}
+	// Version 3 adds MaxBytes.  Starting in version 3, the partition ordering in
+	// the request is now relevant.  Partitions will be processed in the order
+	// they appear in the request.
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 3
+		request.MaxBytes = MaxResponseSize
+	}
+	// Version 4 adds IsolationLevel.  Starting in version 4, the reqestor must be
+	// able to handle Kafka log message format version 2.
+	// Version 5 adds LogStartOffset to indicate the earliest available offset of
+	// partition data that can be consumed.
+	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 5
+		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
+	}
+	// Version 6 is the same as version 5.
+	if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 6
+	}
+	// Version 7 adds incremental fetch request support.
+	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 7
+		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
+		// and the epoch to -1 tells the broker not to generate as session ID we're going
+		// to just ignore anyway.
+		request.SessionID = 0
+		request.SessionEpoch = -1
+	}
+	// Version 8 is the same as version 7.
+	if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 8
+	}
+	// Version 9 adds CurrentLeaderEpoch, as described in KIP-320.
+	// Version 10 indicates that we can use the ZStd compression algorithm, as
+	// described in KIP-110.
+	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 10
+	}
+	// Version 11 adds RackID for KIP-392 fetch from closest replica
+	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
+		request.Version = 11
+		request.RackID = bc.consumer.conf.RackID
+	}
+
+	for child := range bc.subscriptions {
+		if !child.IsPaused() {
+			request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch)
+		}
+	}
+
+	// avoid to fetch when there is no block
+	if len(request.blocks) == 0 {
+		return nil, nil
+	}
+
+	return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go
new file mode 100644
index 0000000..f47eac7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group.go
@@ -0,0 +1,1184 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+	// Consume joins a cluster of consumers for a given list of topics and
+	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+	//
+	// The life-cycle of a session is represented by the following steps:
+	//
+	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+	//    and is assigned their "fair share" of partitions, aka 'claims'.
+	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
+	//    of the claims and allow any necessary preparation or alteration of state.
+	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+	//    from concurrent reads/writes.
+	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+	//    parent context is canceled or when a server-side rebalance cycle is initiated.
+	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+	//    to allow the user to perform any final tasks before a rebalance.
+	// 6. Finally, marked offsets are committed one last time before claims are released.
+	//
+	// Please note, that once a rebalance is triggered, sessions must be completed within
+	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+	// commit failures.
+	// This method should be called inside an infinite loop, when a
+	// server-side rebalance happens, the consumer session will need to be
+	// recreated to get the new claims.
+	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
+	// By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan error
+
+	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+	// this function before the object passes out of scope, as it will otherwise leak memory.
+	Close() error
+
+	// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause(partitions map[string][]int32)
+
+	// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	Resume(partitions map[string][]int32)
+
+	// Pause suspends fetching from all partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	PauseAll()
+
+	// Resume resumes all partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	ResumeAll()
+}
+
+type consumerGroup struct {
+	client Client
+
+	config          *Config
+	consumer        Consumer
+	groupID         string
+	groupInstanceId *string
+	memberID        string
+	errors          chan error
+
+	lock       sync.Mutex
+	errorsLock sync.RWMutex
+	closed     chan none
+	closeOnce  sync.Once
+
+	userData []byte
+
+	metricRegistry metrics.Registry
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := newConsumerGroup(groupID, client)
+	if err != nil {
+		_ = client.Close()
+	}
+	return c, err
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+	if client == nil {
+		return nil, ConfigurationError("client must not be nil")
+	}
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
+	config := client.Config()
+	if !config.Version.IsAtLeast(V0_10_2_0) {
+		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+	}
+
+	consumer, err := newConsumer(client)
+	if err != nil {
+		return nil, err
+	}
+
+	cg := &consumerGroup{
+		client:         client,
+		consumer:       consumer,
+		config:         config,
+		groupID:        groupID,
+		errors:         make(chan error, config.ChannelBufferSize),
+		closed:         make(chan none),
+		userData:       config.Consumer.Group.Member.UserData,
+		metricRegistry: newCleanupRegistry(config.MetricRegistry),
+	}
+	if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) {
+		cg.groupInstanceId = &config.Consumer.Group.InstanceId
+	}
+	return cg, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+	c.closeOnce.Do(func() {
+		close(c.closed)
+
+		// leave group
+		if e := c.leave(); e != nil {
+			err = e
+		}
+
+		go func() {
+			c.errorsLock.Lock()
+			defer c.errorsLock.Unlock()
+			close(c.errors)
+		}()
+
+		// drain errors
+		for e := range c.errors {
+			err = e
+		}
+
+		if e := c.client.Close(); e != nil {
+			err = e
+		}
+
+		c.metricRegistry.UnregisterAll()
+	})
+	return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+	// Ensure group is not closed
+	select {
+	case <-c.closed:
+		return ErrClosedConsumerGroup
+	default:
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	// Quick exit when no topics are provided
+	if len(topics) == 0 {
+		return fmt.Errorf("no topics provided")
+	}
+
+	// Refresh metadata for requested topics
+	if err := c.client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	// Init session
+	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+	if errors.Is(err, ErrClosedClient) {
+		return ErrClosedConsumerGroup
+	} else if err != nil {
+		return err
+	}
+
+	// Wait for session exit signal or Close() call
+	select {
+	case <-c.closed:
+	case <-sess.ctx.Done():
+	}
+
+	// Gracefully release session claims
+	return sess.release(true)
+}
+
+// Pause implements ConsumerGroup.
+func (c *consumerGroup) Pause(partitions map[string][]int32) {
+	c.consumer.Pause(partitions)
+}
+
+// Resume implements ConsumerGroup.
+func (c *consumerGroup) Resume(partitions map[string][]int32) {
+	c.consumer.Resume(partitions)
+}
+
+// PauseAll implements ConsumerGroup.
+func (c *consumerGroup) PauseAll() {
+	c.consumer.PauseAll()
+}
+
+// ResumeAll implements ConsumerGroup.
+func (c *consumerGroup) ResumeAll() {
+	c.consumer.ResumeAll()
+}
+
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case <-c.closed:
+		return nil, ErrClosedConsumerGroup
+	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+	}
+
+	if refreshCoordinator {
+		err := c.client.RefreshCoordinator(c.groupID)
+		if err != nil {
+			if retries <= 0 {
+				return nil, err
+			}
+			return c.retryNewSession(ctx, topics, handler, retries-1, true)
+		}
+	}
+
+	return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+	if ctx.Err() != nil {
+		return nil, ctx.Err()
+	}
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		if retries <= 0 {
+			return nil, err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	}
+
+	var (
+		metricRegistry          = c.metricRegistry
+		consumerGroupJoinTotal  metrics.Counter
+		consumerGroupJoinFailed metrics.Counter
+		consumerGroupSyncTotal  metrics.Counter
+		consumerGroupSyncFailed metrics.Counter
+	)
+
+	if metricRegistry != nil {
+		consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry)
+		consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry)
+		consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry)
+		consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry)
+	}
+
+	// Join consumer group
+	join, err := c.joinGroupRequest(coordinator, topics)
+	if consumerGroupJoinTotal != nil {
+		consumerGroupJoinTotal.Inc(1)
+	}
+	if err != nil {
+		_ = coordinator.Close()
+		if consumerGroupJoinFailed != nil {
+			consumerGroupJoinFailed.Inc(1)
+		}
+		return nil, err
+	}
+	if !errors.Is(join.Err, ErrNoError) {
+		if consumerGroupJoinFailed != nil {
+			consumerGroupJoinFailed.Inc(1)
+		}
+	}
+	switch join.Err {
+	case ErrNoError:
+		c.memberID = join.MemberId
+	case ErrUnknownMemberId, ErrIllegalGeneration:
+		// reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+		// retry after backoff
+		if retries <= 0 {
+			return nil, join.Err
+		}
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrMemberIdRequired:
+		// from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts
+		// with an empty member id, it needs to get the assigned id from the
+		// response and send another join request with that id to actually join the
+		// group
+		c.memberID = join.MemberId
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrFencedInstancedId:
+		if c.groupInstanceId != nil {
+			Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+		}
+		return nil, join.Err
+	default:
+		return nil, join.Err
+	}
+
+	var strategy BalanceStrategy
+	var ok bool
+	if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil {
+		strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies)
+		if !ok {
+			// this case shouldn't happen in practice, since the leader will choose the protocol
+			// that all the members support
+			return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol)
+		}
+	}
+
+	// Prepare distribution plan if we joined as the leader
+	var plan BalanceStrategyPlan
+	var members map[string]ConsumerGroupMemberMetadata
+	var allSubscribedTopicPartitions map[string][]int32
+	var allSubscribedTopics []string
+	if join.LeaderId == join.MemberId {
+		members, err = join.GetMembers()
+		if err != nil {
+			return nil, err
+		}
+
+		allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Sync consumer group
+	syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy)
+	if consumerGroupSyncTotal != nil {
+		consumerGroupSyncTotal.Inc(1)
+	}
+	if err != nil {
+		_ = coordinator.Close()
+		if consumerGroupSyncFailed != nil {
+			consumerGroupSyncFailed.Inc(1)
+		}
+		return nil, err
+	}
+	if !errors.Is(syncGroupResponse.Err, ErrNoError) {
+		if consumerGroupSyncFailed != nil {
+			consumerGroupSyncFailed.Inc(1)
+		}
+	}
+
+	switch syncGroupResponse.Err {
+	case ErrNoError:
+	case ErrUnknownMemberId, ErrIllegalGeneration:
+		// reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+		// retry after backoff
+		if retries <= 0 {
+			return nil, syncGroupResponse.Err
+		}
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrFencedInstancedId:
+		if c.groupInstanceId != nil {
+			Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+		}
+		return nil, syncGroupResponse.Err
+	default:
+		return nil, syncGroupResponse.Err
+	}
+
+	// Retrieve and sort claims
+	var claims map[string][]int32
+	if len(syncGroupResponse.MemberAssignment) > 0 {
+		members, err := syncGroupResponse.GetMemberAssignment()
+		if err != nil {
+			return nil, err
+		}
+		claims = members.Topics
+
+		// in the case of stateful balance strategies, hold on to the returned
+		// assignment metadata, otherwise, reset the statically defined consumer
+		// group metadata
+		if members.UserData != nil {
+			c.userData = members.UserData
+		} else {
+			c.userData = c.config.Consumer.Group.Member.UserData
+		}
+
+		for _, partitions := range claims {
+			sort.Sort(int32Slice(partitions))
+		}
+	}
+
+	session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+	if err != nil {
+		return nil, err
+	}
+
+	// only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance
+	if join.LeaderId == join.MemberId {
+		go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session)
+	}
+
+	return session, err
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+	req := &JoinGroupRequest{
+		GroupId:        c.groupID,
+		MemberId:       c.memberID,
+		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+		ProtocolType:   "consumer",
+	}
+	if c.config.Version.IsAtLeast(V0_10_1_0) {
+		req.Version = 1
+		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 3
+	}
+	// from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually
+	// send two JoinGroupRequests, once with the empty member id, and then again
+	// with the assigned id from the first response. This is handled via the
+	// ErrMemberIdRequired case.
+	if c.config.Version.IsAtLeast(V2_2_0_0) {
+		req.Version = 4
+	}
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 5
+		req.GroupInstanceId = c.groupInstanceId
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 6
+		}
+	}
+
+	meta := &ConsumerGroupMemberMetadata{
+		Topics:   topics,
+		UserData: c.userData,
+	}
+	var strategy BalanceStrategy
+	if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil {
+		if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+			return nil, err
+		}
+	} else {
+		for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies {
+			if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return coordinator.JoinGroup(req)
+}
+
+// findStrategy returns the BalanceStrategy with the specified protocolName
+// from the slice provided.
+func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) {
+	for _, strategy := range groupStrategies {
+		if strategy.Name() == name {
+			return strategy, true
+		}
+	}
+	return nil, false
+}
+
+func (c *consumerGroup) syncGroupRequest(
+	coordinator *Broker,
+	members map[string]ConsumerGroupMemberMetadata,
+	plan BalanceStrategyPlan,
+	generationID int32,
+	strategy BalanceStrategy,
+) (*SyncGroupResponse, error) {
+	req := &SyncGroupRequest{
+		GroupId:      c.groupID,
+		MemberId:     c.memberID,
+		GenerationId: generationID,
+	}
+
+	// Versions 1 and 2 are the same as version 0.
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	// Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 3
+		req.GroupInstanceId = c.groupInstanceId
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 4
+		}
+	}
+
+	for memberID, topics := range plan {
+		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
+		if err != nil {
+			return nil, err
+		}
+		assignment.UserData = userDataBytes
+		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+			return nil, err
+		}
+		delete(members, memberID)
+	}
+	// add empty assignments for any remaining members
+	for memberID := range members {
+		if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil {
+			return nil, err
+		}
+	}
+
+	return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+	req := &HeartbeatRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	}
+
+	// Version 1 and version 2 are the same as version 0.
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	// Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 3
+		req.GroupInstanceId = c.groupInstanceId
+		// Version 4 is the first flexible version
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 4
+		}
+	}
+
+	return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) {
+	topicPartitions := make(map[string][]int32)
+	for _, meta := range members {
+		for _, topic := range meta.Topics {
+			topicPartitions[topic] = nil
+		}
+	}
+
+	allSubscribedTopics := make([]string, 0, len(topicPartitions))
+	for topic := range topicPartitions {
+		allSubscribedTopics = append(allSubscribedTopics, topic)
+	}
+
+	// refresh metadata for all the subscribed topics in the consumer group
+	// to avoid using stale metadata to assigning partitions
+	err := c.client.RefreshMetadata(allSubscribedTopics...)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+
+	for topic := range topicPartitions {
+		partitions, err := c.client.Partitions(topic)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+		topicPartitions[topic] = partitions
+	}
+
+	plan, err := strategy.Plan(members, topicPartitions)
+	return topicPartitions, allSubscribedTopics, plan, err
+}
+
+// Leaves the cluster, called by Close.
+func (c *consumerGroup) leave() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.memberID == "" {
+		return nil
+	}
+
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		return err
+	}
+
+	// as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID
+	if c.groupInstanceId != nil {
+		c.memberID = ""
+		return nil
+	}
+	req := &LeaveGroupRequest{
+		GroupId:  c.groupID,
+		MemberId: c.memberID,
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V2_4_0_0) {
+		req.Version = 4
+		req.Members = append(req.Members, MemberIdentity{
+			MemberId: c.memberID,
+		})
+	}
+
+	resp, err := coordinator.LeaveGroup(req)
+	if err != nil {
+		_ = coordinator.Close()
+		return err
+	}
+
+	// clear the memberID
+	c.memberID = ""
+
+	switch resp.Err {
+	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+		return nil
+	default:
+		return resp.Err
+	}
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+	var consumerError *ConsumerError
+	if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 {
+		err = &ConsumerError{
+			Topic:     topic,
+			Partition: partition,
+			Err:       err,
+		}
+	}
+
+	if !c.config.Consumer.Return.Errors {
+		Logger.Println(err)
+		return
+	}
+
+	c.errorsLock.RLock()
+	defer c.errorsLock.RUnlock()
+	select {
+	case <-c.closed:
+		// consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
+	}
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) {
+	if c.config.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	defer session.cancel()
+
+	oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions))
+	for topic, partitions := range allSubscribedTopicPartitions {
+		oldTopicToPartitionNum[topic] = len(partitions)
+	}
+
+	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
+	defer pause.Stop()
+	for {
+		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+			return
+		} else {
+			for topic, num := range oldTopicToPartitionNum {
+				if newTopicToPartitionNum[topic] != num {
+					Logger.Printf(
+						"consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n",
+						c.groupID, topics, num, newTopicToPartitionNum[topic])
+					return // trigger the end of the session on exit
+				}
+			}
+		}
+		select {
+		case <-pause.C:
+		case <-session.ctx.Done():
+			Logger.Printf(
+				"consumergroup/%s loop check partition number goroutine will exit, topics %s\n",
+				c.groupID, topics)
+			// if session closed by other, should be exited
+			return
+		case <-c.closed:
+			return
+		}
+	}
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+	topicToPartitionNum := make(map[string]int, len(topics))
+	for _, topic := range topics {
+		if partitionNum, err := c.client.Partitions(topic); err != nil {
+			Logger.Printf(
+				"consumergroup/%s topic %s get partition number failed due to '%v'\n",
+				c.groupID, topic, err)
+			return nil, err
+		} else {
+			topicToPartitionNum[topic] = len(partitionNum)
+		}
+	}
+	return topicToPartitionNum, nil
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+	// Claims returns information about the claimed partitions by topic.
+	Claims() map[string][]int32
+
+	// MemberID returns the cluster member ID.
+	MemberID() string
+
+	// GenerationID returns the current generation ID.
+	GenerationID() int32
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+	// Commit the offset to the backend
+	//
+	// Note: calling Commit performs a blocking synchronous operation.
+	Commit()
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+	// MarkMessage marks a message as consumed.
+	MarkMessage(msg *ConsumerMessage, metadata string)
+
+	// Context returns the session context.
+	Context() context.Context
+}
+
+type consumerGroupSession struct {
+	parent       *consumerGroup
+	memberID     string
+	generationID int32
+	handler      ConsumerGroupHandler
+
+	claims  map[string][]int32
+	offsets *offsetManager
+	ctx     context.Context
+	cancel  func()
+
+	waitGroup       sync.WaitGroup
+	releaseOnce     sync.Once
+	hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+	// init context
+	ctx, cancel := context.WithCancel(ctx)
+
+	// init offset manager
+	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel)
+	if err != nil {
+		return nil, err
+	}
+
+	// init session
+	sess := &consumerGroupSession{
+		parent:       parent,
+		memberID:     memberID,
+		generationID: generationID,
+		handler:      handler,
+		offsets:      offsets,
+		claims:       claims,
+		ctx:          ctx,
+		cancel:       cancel,
+		hbDying:      make(chan none),
+		hbDead:       make(chan none),
+	}
+
+	// start heartbeat loop
+	go sess.heartbeatLoop()
+
+	// create a POM for each claim
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			pom, err := offsets.ManagePartition(topic, partition)
+			if err != nil {
+				_ = sess.release(false)
+				return nil, err
+			}
+
+			// handle POM errors
+			go func(topic string, partition int32) {
+				for err := range pom.Errors() {
+					sess.parent.handleError(err, topic, partition)
+				}
+			}(topic, partition)
+		}
+	}
+
+	// perform setup
+	if err := handler.Setup(sess); err != nil {
+		_ = sess.release(true)
+		return nil, err
+	}
+
+	// start consuming each topic partition in its own goroutine
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			sess.waitGroup.Add(1) // increment wait group before spawning goroutine
+			go func(topic string, partition int32) {
+				defer sess.waitGroup.Done()
+				// cancel the group session as soon as any of the consume calls return
+				defer sess.cancel()
+
+				// if partition not currently readable, wait for it to become readable
+				if sess.parent.client.PartitionNotReadable(topic, partition) {
+					timer := time.NewTimer(5 * time.Second)
+					defer timer.Stop()
+
+					for sess.parent.client.PartitionNotReadable(topic, partition) {
+						select {
+						case <-ctx.Done():
+							return
+						case <-parent.closed:
+							return
+						case <-timer.C:
+							timer.Reset(5 * time.Second)
+						}
+					}
+				}
+
+				// consume a single topic/partition, blocking
+				sess.consume(topic, partition)
+			}(topic, partition)
+		}
+	}
+	return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string           { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.MarkOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) Commit() {
+	s.offsets.Commit()
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.ResetOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+	return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+	// quick exit if rebalance is due
+	select {
+	case <-s.ctx.Done():
+		return
+	case <-s.parent.closed:
+		return
+	default:
+	}
+
+	// get next offset
+	offset := s.parent.config.Consumer.Offsets.Initial
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		offset, _ = pom.NextOffset()
+	}
+
+	// create new claim
+	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+	if err != nil {
+		s.parent.handleError(err, topic, partition)
+		return
+	}
+
+	// handle errors
+	go func() {
+		for err := range claim.Errors() {
+			s.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	// trigger close when session is done
+	go func() {
+		select {
+		case <-s.ctx.Done():
+		case <-s.parent.closed:
+		}
+		claim.AsyncClose()
+	}()
+
+	// start processing
+	if err := s.handler.ConsumeClaim(s, claim); err != nil {
+		s.parent.handleError(err, topic, partition)
+	}
+
+	// ensure consumer is closed & drained
+	claim.AsyncClose()
+	for _, err := range claim.waitClosed() {
+		s.parent.handleError(err, topic, partition)
+	}
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+	// signal release, stop heartbeat
+	s.cancel()
+
+	// wait for consumers to exit
+	s.waitGroup.Wait()
+
+	// perform release
+	s.releaseOnce.Do(func() {
+		if withCleanup {
+			if e := s.handler.Cleanup(s); e != nil {
+				s.parent.handleError(e, "", -1)
+				err = e
+			}
+		}
+
+		if e := s.offsets.Close(); e != nil {
+			err = e
+		}
+
+		close(s.hbDying)
+		<-s.hbDead
+	})
+
+	Logger.Printf(
+		"consumergroup/session/%s/%d released\n",
+		s.MemberID(), s.GenerationID())
+
+	return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+	defer close(s.hbDead)
+	defer s.cancel() // trigger the end of the session on exit
+	defer func() {
+		Logger.Printf(
+			"consumergroup/session/%s/%d heartbeat loop stopped\n",
+			s.MemberID(), s.GenerationID())
+	}()
+
+	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+	defer pause.Stop()
+
+	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
+	defer retryBackoff.Stop()
+
+	retries := s.parent.config.Metadata.Retry.Max
+	for {
+		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+		if err != nil {
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
+			select {
+			case <-s.hbDying:
+				return
+			case <-retryBackoff.C:
+				retries--
+			}
+			continue
+		}
+
+		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+		if err != nil {
+			_ = coordinator.Close()
+
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+
+			retries--
+			continue
+		}
+
+		switch resp.Err {
+		case ErrNoError:
+			retries = s.parent.config.Metadata.Retry.Max
+		case ErrRebalanceInProgress:
+			retries = s.parent.config.Metadata.Retry.Max
+			s.cancel()
+		case ErrUnknownMemberId, ErrIllegalGeneration:
+			return
+		case ErrFencedInstancedId:
+			if s.parent.groupInstanceId != nil {
+				Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId)
+			}
+			s.parent.handleError(resp.Err, "", -1)
+			return
+		default:
+			s.parent.handleError(resp.Err, "", -1)
+			return
+		}
+
+		select {
+		case <-pause.C:
+		case <-s.hbDying:
+			return
+		}
+	}
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+	// Setup is run at the beginning of a new session, before ConsumeClaim.
+	Setup(ConsumerGroupSession) error
+
+	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+	// but before the offsets are committed for the very last time.
+	Cleanup(ConsumerGroupSession) error
+
+	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+	// Once the Messages() channel is closed, the Handler must finish its processing
+	// loop and exit.
+	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+	// Topic returns the consumed topic name.
+	Topic() string
+
+	// Partition returns the consumed partition.
+	Partition() int32
+
+	// InitialOffset returns the initial offset that was used as a starting point for this claim.
+	InitialOffset() int64
+
+	// HighWaterMarkOffset returns the high watermark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker. The messages channel will be closed when a new rebalance cycle
+	// is due. You must finish processing and mark offsets within
+	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+	// re-assigned to another group member.
+	Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+	topic     string
+	partition int32
+	offset    int64
+	PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+
+	if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets {
+		offset = sess.parent.config.Consumer.Offsets.Initial
+		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		for err := range pcm.Errors() {
+			sess.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	return &consumerGroupClaim{
+		topic:             topic,
+		partition:         partition,
+		offset:            offset,
+		PartitionConsumer: pcm,
+	}, nil
+}
+
+func (c *consumerGroupClaim) Topic() string        { return c.topic }
+func (c *consumerGroupClaim) Partition() int32     { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+	go func() {
+		for range c.Messages() {
+		}
+	}()
+
+	for err := range c.Errors() {
+		errs = append(errs, err)
+	}
+	return
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d38960
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group_members.go
@@ -0,0 +1,183 @@
+package sarama
+
+import "errors"
+
+// ConsumerGroupMemberMetadata holds the metadata for consumer group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json
+type ConsumerGroupMemberMetadata struct {
+	Version         int16
+	Topics          []string
+	UserData        []byte
+	OwnedPartitions []*OwnedPartition
+	GenerationID    int32
+	RackID          *string
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putStringArray(m.Topics); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	if m.Version >= 1 {
+		if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil {
+			return err
+		}
+		for _, op := range m.OwnedPartitions {
+			if err := op.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	if m.Version >= 2 {
+		pe.putInt32(m.GenerationID)
+	}
+
+	if m.Version >= 3 {
+		if err := pe.putNullableString(m.RackID); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	if m.Topics, err = pd.getStringArray(); err != nil {
+		return
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+	if m.Version >= 1 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			// permit missing data here in case of misbehaving 3rd party
+			// clients who incorrectly marked the member metadata as V1 in
+			// their JoinGroup request
+			if errors.Is(err, ErrInsufficientData) {
+				return nil
+			}
+			return err
+		}
+		if n > 0 {
+			m.OwnedPartitions = make([]*OwnedPartition, n)
+			for i := 0; i < n; i++ {
+				m.OwnedPartitions[i] = &OwnedPartition{}
+				if err := m.OwnedPartitions[i].decode(pd); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	if m.Version >= 2 {
+		if m.GenerationID, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if m.Version >= 3 {
+		if m.RackID, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type OwnedPartition struct {
+	Topic      string
+	Partitions []int32
+}
+
+func (m *OwnedPartition) encode(pe packetEncoder) error {
+	if err := pe.putString(m.Topic); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(m.Partitions); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (m *OwnedPartition) decode(pd packetDecoder) (err error) {
+	if m.Topic, err = pd.getString(); err != nil {
+		return err
+	}
+	if m.Partitions, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ConsumerGroupMemberAssignment holds the member assignment for a consume group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json
+type ConsumerGroupMemberAssignment struct {
+	Version  int16
+	Topics   map[string][]int32
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..85ffb03
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
@@ -0,0 +1,55 @@
+package sarama
+
+// ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+	Version       int16
+	ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+	tmp := new(FindCoordinatorRequest)
+	tmp.CoordinatorKey = r.ConsumerGroup
+	tmp.CoordinatorType = CoordinatorGroup
+	tmp.Version = r.Version
+	return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorRequest)
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+	r.ConsumerGroup = tmp.CoordinatorKey
+	return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ConsumerMetadataRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *ConsumerMetadataRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..7fb080b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+	"net"
+	"strconv"
+)
+
+// ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+	Version         int16
+	Err             KError
+	Coordinator     *Broker
+	CoordinatorID   int32  // deprecated: use Coordinator.ID()
+	CoordinatorHost string // deprecated: use Coordinator.Addr()
+	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorResponse)
+
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+
+	r.Err = tmp.Err
+
+	r.Coordinator = tmp.Coordinator
+	if tmp.Coordinator == nil {
+		return nil
+	}
+
+	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+	// backwards compatibility
+	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+	if err != nil {
+		return err
+	}
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+	r.CoordinatorID = r.Coordinator.ID()
+	r.CoordinatorHost = host
+	r.CoordinatorPort = int32(port)
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+	if r.Coordinator == nil {
+		r.Coordinator = new(Broker)
+		r.Coordinator.id = r.CoordinatorID
+		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+	}
+
+	tmp := &FindCoordinatorResponse{
+		Version:     r.Version,
+		Err:         r.Err,
+		Coordinator: r.Coordinator,
+	}
+
+	if err := tmp.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ConsumerMetadataResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/control_record.go
rename to vendor/github.com/IBM/sarama/control_record.go
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/crc32_field.go
rename to vendor/github.com/IBM/sarama/crc32_field.go
diff --git a/vendor/github.com/IBM/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go
new file mode 100644
index 0000000..75e2dec
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_request.go
@@ -0,0 +1,141 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+	Version         int16
+	TopicPartitions map[string]*TopicPartition
+	Timeout         time.Duration
+	ValidateOnly    bool
+}
+
+func (c *CreatePartitionsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partition := range c.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	pe.putBool(c.ValidateOnly)
+
+	return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	c.TopicPartitions = make(map[string]*TopicPartition, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitions[topic] = new(TopicPartition)
+		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if c.ValidateOnly, err = pd.getBool(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+	return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *CreatePartitionsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *CreatePartitionsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_0_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+type TopicPartition struct {
+	Count      int32
+	Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+	pe.putInt32(t.Count)
+
+	if len(t.Assignment) == 0 {
+		pe.putInt32(-1)
+		return nil
+	}
+
+	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+		return err
+	}
+
+	for _, assign := range t.Assignment {
+		if err := pe.putInt32Array(assign); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+	if t.Count, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	n, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if n <= 0 {
+		return nil
+	}
+	t.Assignment = make([][]int32, n)
+
+	for i := 0; i < int(n); i++ {
+		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go
new file mode 100644
index 0000000..0785c35
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_response.go
@@ -0,0 +1,130 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreatePartitionsResponse struct {
+	Version              int16
+	ThrottleTime         time.Duration
+	TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(c.ThrottleTime)
+	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+		return err
+	}
+
+	for topic, partitionError := range c.TopicPartitionErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partitionError.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+	return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *CreatePartitionsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_0_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *CreatePartitionsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type TopicPartitionError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicPartitionError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicPartitionError) Unwrap() error {
+	return t.Err
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+	pe.putKError(t.Err)
+
+	if err := pe.putNullableString(t.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if t.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go
new file mode 100644
index 0000000..5684e27
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_request.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"time"
+)
+
+type CreateTopicsRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// TopicDetails contains the topics to create.
+	TopicDetails map[string]*TopicDetail
+	// Timeout contains how long to wait before timing out the request.
+	Timeout time.Duration
+	// ValidateOnly if true, check that the topics can be created as specified,
+	// but don't create anything.
+	ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func NewCreateTopicsRequest(
+	version KafkaVersion,
+	topicDetails map[string]*TopicDetail,
+	timeout time.Duration,
+	validateOnly bool,
+) *CreateTopicsRequest {
+	r := &CreateTopicsRequest{
+		TopicDetails: topicDetails,
+		Timeout:      timeout,
+		ValidateOnly: validateOnly,
+	}
+	switch {
+	case version.IsAtLeast(V2_4_0_0):
+		// Version 5 is the first flexible version
+		// Version 4 makes partitions/replicationFactor optional even when assignments are not present (KIP-464)
+		r.Version = 5
+	case version.IsAtLeast(V2_0_0_0):
+		// Version 3 is the same as version 2 (brokers response before throttling)
+		r.Version = 3
+	case version.IsAtLeast(V0_11_0_0):
+		// Version 2 is the same as version 1 (response has ThrottleTime)
+		r.Version = 2
+	case version.IsAtLeast(V0_10_2_0):
+		// Version 1 adds validateOnly.
+		r.Version = 1
+	}
+	return r
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+		return err
+	}
+	for topic, detail := range c.TopicDetails {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := detail.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	if c.Version >= 1 {
+		pe.putBool(c.ValidateOnly)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicDetails = make(map[string]*TopicDetail, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicDetails[topic] = new(TopicDetail)
+		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if version >= 1 {
+		c.ValidateOnly, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		c.Version = version
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+	return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsRequest) headerVersion() int16 {
+	if c.Version >= 5 {
+		return 2
+	}
+	return 1
+}
+
+func (c *CreateTopicsRequest) isFlexible() bool {
+	return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (c *CreateTopicsRequest) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_2_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+type TopicDetail struct {
+	// NumPartitions contains the number of partitions to create in the topic, or
+	// -1 if we are either specifying a manual partition assignment or using the
+	// default partitions.
+	NumPartitions int32
+	// ReplicationFactor contains the number of replicas to create for each
+	// partition in the topic, or -1 if we are either specifying a manual
+	// partition assignment or using the default replication factor.
+	ReplicationFactor int16
+	// ReplicaAssignment contains the manual partition assignment, or the empty
+	// array if we are using automatic assignment.
+	ReplicaAssignment map[int32][]int32
+	// ConfigEntries contains the custom topic configurations to set.
+	ConfigEntries map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+	pe.putInt32(t.NumPartitions)
+	pe.putInt16(t.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+		return err
+	}
+	for partition, assignment := range t.ReplicaAssignment {
+		pe.putInt32(partition)
+		if err := pe.putInt32Array(assignment); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range t.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+	if t.NumPartitions, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ReplicaAssignment = make(map[int32][]int32, n)
+		for i := 0; i < n; i++ {
+			replica, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go
new file mode 100644
index 0000000..f8be692
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreateTopicsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration for which the request was throttled due
+	// to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTime time.Duration
+	// TopicErrors contains a map of any errors for the topics we tried to create.
+	TopicErrors map[string]*TopicError
+	// TopicResults contains a map of the results for the topics we tried to create.
+	TopicResults map[string]*CreatableTopicResult
+}
+
+func (c *CreateTopicsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+	if c.Version >= 2 {
+		pe.putDurationMs(c.ThrottleTime)
+	}
+
+	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+		return err
+	}
+	for topic, topicError := range c.TopicErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := topicError.encode(pe, c.Version); err != nil {
+			return err
+		}
+		if c.Version >= 5 {
+			result, ok := c.TopicResults[topic]
+			if !ok {
+				return fmt.Errorf("expected TopicResult for topic, %s, for V5 protocol", topic)
+			}
+			if err := result.encode(pe, c.Version); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+
+	if version >= 2 {
+		if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicErrors = make(map[string]*TopicError, n)
+	if version >= 5 {
+		c.TopicResults = make(map[string]*CreatableTopicResult, n)
+	}
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicErrors[topic] = new(TopicError)
+		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+		if version >= 5 {
+			c.TopicResults[topic] = &CreatableTopicResult{}
+			if err := c.TopicResults[topic].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+	return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsResponse) headerVersion() int16 {
+	if c.Version >= 5 {
+		return 1
+	}
+	return 0
+}
+
+func (c *CreateTopicsResponse) isFlexible() bool {
+	return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (c *CreateTopicsResponse) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_2_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *CreateTopicsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type TopicError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicError) Unwrap() error {
+	return t.Err
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+	pe.putKError(t.Err)
+
+	if version >= 1 {
+		if err := pe.putNullableString(t.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		if t.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// CreatableTopicResult struct {
+type CreatableTopicResult struct {
+	// TopicConfigErrorCode contains a Optional topic config error returned if configs are not returned in the response.
+	TopicConfigErrorCode KError
+	// NumPartitions contains a Number of partitions of the topic.
+	NumPartitions int32
+	// ReplicationFactor contains a Replication factor of the topic.
+	ReplicationFactor int16
+	// Configs contains a Configuration of the topic.
+	Configs map[string]*CreatableTopicConfigs
+}
+
+func (r *CreatableTopicResult) encode(pe packetEncoder, version int16) error {
+	pe.putInt32(r.NumPartitions)
+	pe.putInt16(r.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+	for name, config := range r.Configs {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+		if err := config.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	if r.TopicConfigErrorCode == ErrNoError {
+		pe.putEmptyTaggedFieldArray()
+		return nil
+	}
+
+	// TODO: refactor to helper for tagged fields
+	pe.putUVarint(1) // number of tagged fields
+
+	pe.putUVarint(0) // tag
+
+	pe.putUVarint(2) // value length
+
+	pe.putKError(r.TopicConfigErrorCode) // tag value
+
+	return nil
+}
+
+func (r *CreatableTopicResult) decode(pd packetDecoder, version int16) (err error) {
+	r.NumPartitions, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	r.ReplicationFactor, err = pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.Configs = make(map[string]*CreatableTopicConfigs, n)
+	for i := 0; i < n; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Configs[name] = &CreatableTopicConfigs{}
+		if err := r.Configs[name].decode(pd, version); err != nil {
+			return err
+		}
+	}
+	err = pd.getTaggedFieldArray(taggedFieldDecoders{
+		0: func(pd packetDecoder) error {
+			r.TopicConfigErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+			return nil
+		},
+	})
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// CreatableTopicConfigs contains a Configuration of the topic.
+type CreatableTopicConfigs struct {
+	// Value contains the configuration value.
+	Value *string
+	// ReadOnly contains a True if the configuration is read-only.
+	ReadOnly bool
+	// ConfigSource contains the configuration source.
+	ConfigSource ConfigSource
+	// IsSensitive contains a True if this configuration is sensitive.
+	IsSensitive bool
+}
+
+func (c *CreatableTopicConfigs) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putNullableString(c.Value); err != nil {
+		return err
+	}
+	pe.putBool(c.ReadOnly)
+	pe.putInt8(int8(c.ConfigSource))
+	pe.putBool(c.IsSensitive)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreatableTopicConfigs) decode(pd packetDecoder, version int16) (err error) {
+	c.Value, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	c.ReadOnly, err = pd.getBool()
+	if err != nil {
+		return err
+	}
+	source, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	c.ConfigSource = ConfigSource(source)
+	c.IsSensitive, err = pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go
new file mode 100644
index 0000000..0a09983
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/decompress.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	snappy "github.com/eapache/go-xerial-snappy"
+	"github.com/klauspost/compress/gzip"
+	"github.com/pierrec/lz4/v4"
+)
+
+var (
+	lz4ReaderPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewReader(nil)
+		},
+	}
+
+	gzipReaderPool sync.Pool
+
+	bufferPool = sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+
+	bytesPool = sync.Pool{
+		New: func() interface{} {
+			res := make([]byte, 0, 4096)
+			return &res
+		},
+	}
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var err error
+		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
+		if !ok {
+			reader, err = gzip.NewReader(bytes.NewReader(data))
+		} else {
+			err = reader.Reset(bytes.NewReader(data))
+		}
+
+		if err != nil {
+			return nil, err
+		}
+
+		buffer := bufferPool.Get().(*bytes.Buffer)
+		_, err = buffer.ReadFrom(reader)
+		// copy the buffer to a new slice with the correct length
+		// reuse gzipReader and buffer
+		gzipReaderPool.Put(reader)
+		res := make([]byte, buffer.Len())
+		copy(res, buffer.Bytes())
+		buffer.Reset()
+		bufferPool.Put(buffer)
+
+		return res, err
+	case CompressionSnappy:
+		return snappy.Decode(data)
+	case CompressionLZ4:
+		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
+		if !ok {
+			reader = lz4.NewReader(bytes.NewReader(data))
+		} else {
+			reader.Reset(bytes.NewReader(data))
+		}
+		buffer := bufferPool.Get().(*bytes.Buffer)
+		_, err := buffer.ReadFrom(reader)
+		// copy the buffer to a new slice with the correct length
+		// reuse lz4Reader and buffer
+		lz4ReaderPool.Put(reader)
+		res := make([]byte, buffer.Len())
+		copy(res, buffer.Bytes())
+		buffer.Reset()
+		bufferPool.Put(buffer)
+
+		return res, err
+	case CompressionZSTD:
+		buffer := *bytesPool.Get().(*[]byte)
+		var err error
+		buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data)
+		// copy the buffer to a new slice with the correct length and reuse buffer
+		res := make([]byte, len(buffer))
+		copy(res, buffer)
+		buffer = buffer[:0]
+		bytesPool.Put(&buffer)
+
+		return res, err
+	default:
+		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go
new file mode 100644
index 0000000..e6c1db0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_request.go
@@ -0,0 +1,71 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+	Version int16
+	Groups  []string
+}
+
+func (r *DeleteGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(r.Groups); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+	return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteGroupsRequest) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DeleteGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DeleteGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_1_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go
new file mode 100644
index 0000000..ca683cd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteGroupsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+		return err
+	}
+	for groupID, errorCode := range r.GroupErrorCodes {
+		if err := pe.putString(groupID); err != nil {
+			return err
+		}
+		pe.putKError(errorCode)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		_, err = pd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	r.GroupErrorCodes = make(map[string]KError, n)
+	for i := 0; i < n; i++ {
+		groupID, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.GroupErrorCodes[groupID], err = pd.getKError()
+		if err != nil {
+			return err
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+	return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteGroupsResponse) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DeleteGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DeleteGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_1_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DeleteGroupsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go
new file mode 100644
index 0000000..8fd10ea
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_request.go
@@ -0,0 +1,101 @@
+package sarama
+
+type DeleteOffsetsRequest struct {
+	Version    int16
+	Group      string
+	partitions map[string][]int32
+}
+
+func (r *DeleteOffsetsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) {
+	err = pe.putString(r.Group)
+	if err != nil {
+		return err
+	}
+
+	if r.partitions == nil {
+		pe.putInt32(0)
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+	}
+	for topic, partitions := range r.partitions {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putInt32Array(partitions)
+		if err != nil {
+			return err
+		}
+	}
+	return
+}
+
+func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Group, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	var partitionCount int
+
+	partitionCount, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+
+	r.partitions = make(map[string][]int32, partitionCount)
+	for i := 0; i < partitionCount; i++ {
+		var topic string
+		topic, err = pd.getString()
+		if err != nil {
+			return err
+		}
+
+		var partitions []int32
+		partitions, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		r.partitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (r *DeleteOffsetsRequest) key() int16 {
+	return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteOffsetsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DeleteOffsetsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go
new file mode 100644
index 0000000..64180b1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_response.go
@@ -0,0 +1,121 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteOffsetsResponse struct {
+	Version int16
+	// The top-level error code, or 0 if there was no error.
+	ErrorCode    KError
+	ThrottleTime time.Duration
+	// The responses for each partition of the topics.
+	Errors map[string]map[int32]KError
+}
+
+func (r *DeleteOffsetsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = errorCode
+}
+
+func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.ErrorCode)
+	pe.putDurationMs(r.ThrottleTime)
+
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, errorCode := range partitions {
+			pe.putInt32(partition)
+			pe.putKError(errorCode)
+		}
+	}
+	return nil
+}
+
+func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[name][id], err = pd.getKError()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *DeleteOffsetsResponse) key() int16 {
+	return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteOffsetsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DeleteOffsetsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *DeleteOffsetsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go
new file mode 100644
index 0000000..05538dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_request.go
@@ -0,0 +1,145 @@
+package sarama
+
+import (
+	"slices"
+	"sort"
+	"time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+	Version int16
+	Topics  map[string]*DeleteRecordsRequestTopic
+	Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsRequestTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+	return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteRecordsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DeleteRecordsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+type DeleteRecordsRequestTopic struct {
+	PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.PartitionOffsets))
+	for partition := range t.PartitionOffsets {
+		keys = append(keys, partition)
+	}
+	slices.Sort(keys)
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		pe.putInt64(t.PartitionOffsets[partition])
+	}
+	return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.PartitionOffsets = make(map[int32]int64, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			offset, err := pd.getInt64()
+			if err != nil {
+				return err
+			}
+			t.PartitionOffsets[partition] = offset
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go
new file mode 100644
index 0000000..4a41811
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_response.go
@@ -0,0 +1,177 @@
+package sarama
+
+import (
+	"slices"
+	"sort"
+	"time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = version
+
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponseTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+	return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteRecordsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DeleteRecordsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type DeleteRecordsResponseTopic struct {
+	Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.Partitions))
+	for partition := range t.Partitions {
+		keys = append(keys, partition)
+	}
+	slices.Sort(keys)
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		if err := t.Partitions[partition].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponsePartition)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			t.Partitions[partition] = details
+		}
+	}
+
+	return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+	LowWatermark int64
+	Err          KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt64(t.LowWatermark)
+	pe.putKError(t.Err)
+	return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+	lowWatermark, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	t.LowWatermark = lowWatermark
+
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go
new file mode 100644
index 0000000..cb9e924
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_request.go
@@ -0,0 +1,98 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+	Version int16
+	Topics  []string
+	Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest {
+	d := &DeleteTopicsRequest{
+		Topics:  topics,
+		Timeout: timeout,
+	}
+	if version.IsAtLeast(V2_4_0_0) {
+		d.Version = 4
+	} else if version.IsAtLeast(V2_1_0_0) {
+		d.Version = 3
+	} else if version.IsAtLeast(V2_0_0_0) {
+		d.Version = 2
+	} else if version.IsAtLeast(V0_11_0_0) {
+		d.Version = 1
+	}
+	return d
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(d.Topics); err != nil {
+		return err
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	if d.Topics, err = pd.getStringArray(); err != nil {
+		return err
+	}
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+	d.Version = version
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+	return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsRequest) headerVersion() int16 {
+	if d.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (d *DeleteTopicsRequest) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (d *DeleteTopicsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_1_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_2_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go
new file mode 100644
index 0000000..4b53f0b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_response.go
@@ -0,0 +1,118 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteTopicsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+	if d.Version >= 1 {
+		pe.putDurationMs(d.ThrottleTime)
+	}
+
+	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+		return err
+	}
+	for topic, errorCode := range d.TopicErrorCodes {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		pe.putKError(errorCode)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+
+		d.Version = version
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.TopicErrorCodes = make(map[string]KError, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		d.TopicErrorCodes[topic], err = pd.getKError()
+		if err != nil {
+			return err
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+	return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsResponse) headerVersion() int16 {
+	if d.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (d *DeleteTopicsResponse) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (d *DeleteTopicsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_1_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_2_0_0
+	}
+}
+
+func (r *DeleteTopicsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
new file mode 100644
index 0000000..120ed33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
@@ -0,0 +1,194 @@
+package sarama
+
+// DescribeClientQuotas Request (Version: 0) => [components] strict
+//   components => entity_type match_type match
+//     entity_type => STRING
+//     match_type => INT8
+//     match => NULLABLE_STRING
+//   strict => BOOLEAN
+// DescribeClientQuotas Request (Version: 1) => [components] strict _tagged_fields
+//   components => entity_type match_type match _tagged_fields
+//     entity_type => COMPACT_STRING
+//     match_type => INT8
+//     match => COMPACT_NULLABLE_STRING
+//   strict => BOOLEAN
+
+// DescribeClientQuotasRequest contains a filter to be applied to matching
+// client quotas.
+// Components: the components to filter on
+// Strict: whether the filter only includes specified components
+type DescribeClientQuotasRequest struct {
+	Version    int16
+	Components []QuotaFilterComponent
+	Strict     bool
+}
+
+func NewDescribeClientQuotasRequest(version KafkaVersion, components []QuotaFilterComponent, strict bool) *DescribeClientQuotasRequest {
+	d := &DescribeClientQuotasRequest{
+		Components: components,
+		Strict:     strict,
+	}
+	if version.IsAtLeast(V2_8_0_0) {
+		d.Version = 1
+	}
+	return d
+}
+
+func (d *DescribeClientQuotasRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+// QuotaFilterComponent describes a component for applying a client quota filter.
+// EntityType: the entity type the filter component applies to ("user", "client-id", "ip")
+// MatchType: the match type of the filter component (any, exact, default)
+// Match: the name that's matched exactly (used when MatchType is QuotaMatchExact)
+type QuotaFilterComponent struct {
+	EntityType QuotaEntityType
+	MatchType  QuotaMatchType
+	Match      string
+}
+
+func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error {
+	// Components
+	if err := pe.putArrayLength(len(d.Components)); err != nil {
+		return err
+	}
+	for _, c := range d.Components {
+		if err := c.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Strict
+	pe.putBool(d.Strict)
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+	// Components
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		d.Components = make([]QuotaFilterComponent, componentCount)
+		for i := range d.Components {
+			c := QuotaFilterComponent{}
+			if err = c.decode(pd, version); err != nil {
+				return err
+			}
+			d.Components[i] = c
+		}
+	} else {
+		d.Components = []QuotaFilterComponent{}
+	}
+
+	// Strict
+	strict, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	d.Strict = strict
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *QuotaFilterComponent) encode(pe packetEncoder) error {
+	// EntityType
+	if err := pe.putString(string(d.EntityType)); err != nil {
+		return err
+	}
+
+	// MatchType
+	pe.putInt8(int8(d.MatchType))
+
+	// Match
+	if d.MatchType == QuotaMatchAny {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else if d.MatchType == QuotaMatchDefault {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else {
+		if err := pe.putString(d.Match); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error {
+	// EntityType
+	entityType, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	d.EntityType = QuotaEntityType(entityType)
+
+	// MatchType
+	matchType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	d.MatchType = QuotaMatchType(matchType)
+
+	// Match
+	match, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	if match != nil {
+		d.Match = *match
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasRequest) key() int16 {
+	return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeClientQuotasRequest) headerVersion() int16 {
+	if d.Version >= 1 {
+		return 2
+	}
+
+	return 1
+}
+
+func (d *DescribeClientQuotasRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasRequest) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasRequest) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_8_0_0
+	case 0:
+		return V2_6_0_0
+	default:
+		return V2_8_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
new file mode 100644
index 0000000..4cf9e5b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
@@ -0,0 +1,285 @@
+package sarama
+
+import (
+	"time"
+)
+
+// DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries]
+//   throttle_time_ms => INT32
+//   error_code => INT16
+//   error_message => NULLABLE_STRING
+//   entries => [entity] [values]
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+//     values => key value
+//       key => STRING
+//       value => FLOAT64
+// DescribeClientQuotas Response (Version: 1) => throttle_time_ms error_code error_message [entries] _tagged_fields
+//   throttle_time_ms => INT32
+//   error_code => INT16
+//   error_message => COMPACT_NULLABLE_STRING
+//   entries => [entity] [values] _tagged_fields
+//     entity => entity_type entity_name _tagged_fields
+//       entity_type => COMPACT_STRING
+//       entity_name => COMPACT_NULLABLE_STRING
+//     values => key value _tagged_fields
+//       key => COMPACT_STRING
+//       value => FLOAT64
+
+type DescribeClientQuotasResponse struct {
+	Version      int16
+	ThrottleTime time.Duration               // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ErrorCode    KError                      // The error code, or `0` if the quota description succeeded.
+	ErrorMsg     *string                     // The error message, or `null` if the quota description succeeded.
+	Entries      []DescribeClientQuotasEntry // A result entry.
+}
+
+func (d *DescribeClientQuotasResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+type DescribeClientQuotasEntry struct {
+	Entity []QuotaEntityComponent // The quota entity description.
+	Values map[string]float64     // The quota values for the entity.
+}
+
+type QuotaEntityComponent struct {
+	EntityType QuotaEntityType
+	MatchType  QuotaMatchType
+	Name       string
+}
+
+func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error {
+	// ThrottleTime
+	pe.putDurationMs(d.ThrottleTime)
+
+	// ErrorCode
+	pe.putKError(d.ErrorCode)
+
+	// ErrorMsg
+	if err := pe.putNullableString(d.ErrorMsg); err != nil {
+		return err
+	}
+
+	// Entries
+	if err := pe.putArrayLength(len(d.Entries)); err != nil {
+		return err
+	}
+	for _, e := range d.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	// ErrorCode
+	d.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	// ErrorMsg
+	errMsg, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	d.ErrorMsg = errMsg
+
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		d.Entries = make([]DescribeClientQuotasEntry, entryCount)
+		for i := range d.Entries {
+			e := DescribeClientQuotasEntry{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			d.Entries[i] = e
+		}
+	} else {
+		d.Entries = []DescribeClientQuotasEntry{}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error {
+	// Entity
+	if err := pe.putArrayLength(len(d.Entity)); err != nil {
+		return err
+	}
+	for _, e := range d.Entity {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Values
+	if err := pe.putArrayLength(len(d.Values)); err != nil {
+		return err
+	}
+	for key, value := range d.Values {
+		// key
+		if err := pe.putString(key); err != nil {
+			return err
+		}
+		// value
+		pe.putFloat64(value)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		d.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			d.Entity[i] = component
+		}
+	} else {
+		d.Entity = []QuotaEntityComponent{}
+	}
+
+	// Values
+	valueCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if valueCount > 0 {
+		d.Values = make(map[string]float64, valueCount)
+		for i := 0; i < valueCount; i++ {
+			// key
+			key, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			// value
+			value, err := pd.getFloat64()
+			if err != nil {
+				return err
+			}
+			d.Values[key] = value
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		d.Values = map[string]float64{}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (c *QuotaEntityComponent) encode(pe packetEncoder) error {
+	// entity_type
+	if err := pe.putString(string(c.EntityType)); err != nil {
+		return err
+	}
+	// entity_name
+	if c.MatchType == QuotaMatchDefault {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else {
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error {
+	// entity_type
+	entityType, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.EntityType = QuotaEntityType(entityType)
+
+	// entity_name
+	entityName, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	if entityName == nil {
+		c.MatchType = QuotaMatchDefault
+	} else {
+		c.MatchType = QuotaMatchExact
+		c.Name = *entityName
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasResponse) key() int16 {
+	return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeClientQuotasResponse) headerVersion() int16 {
+	if d.Version >= 1 {
+		return 1
+	}
+
+	return 0
+}
+
+func (d *DescribeClientQuotasResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasResponse) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasResponse) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_8_0_0
+	case 0:
+		return V2_6_0_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *DescribeClientQuotasResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go
new file mode 100644
index 0000000..d42e271
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+	Version         int16
+	Resources       []*ConfigResource
+	IncludeSynonyms bool
+}
+
+func (r *DescribeConfigsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type ConfigResource struct {
+	Type        ConfigResourceType
+	Name        string
+	ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		pe.putInt8(int8(c.Type))
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+
+		if len(c.ConfigNames) == 0 {
+			pe.putInt32(-1)
+			continue
+		}
+		if err := pe.putStringArray(c.ConfigNames); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putBool(r.IncludeSynonyms)
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ConfigResource, n)
+
+	for i := 0; i < n; i++ {
+		r.Resources[i] = &ConfigResource{}
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Type = ConfigResourceType(t)
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Name = name
+
+		confLength, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if confLength == -1 {
+			continue
+		}
+
+		cfnames := make([]string, confLength)
+		for i := 0; i < confLength; i++ {
+			s, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			cfnames[i] = s
+		}
+		r.Resources[i].ConfigNames = cfnames
+	}
+	r.Version = version
+	if r.Version >= 1 {
+		b, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeSynonyms = b
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+	return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DescribeConfigsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V1_1_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go
new file mode 100644
index 0000000..c702f02
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_response.go
@@ -0,0 +1,352 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+	switch s {
+	case SourceUnknown:
+		return "Unknown"
+	case SourceTopic:
+		return "Topic"
+	case SourceDynamicBroker:
+		return "DynamicBroker"
+	case SourceDynamicDefaultBroker:
+		return "DynamicDefaultBroker"
+	case SourceStaticBroker:
+		return "StaticBroker"
+	case SourceDefault:
+		return "Default"
+	}
+	return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+	SourceUnknown ConfigSource = iota
+	SourceTopic
+	SourceDynamicBroker
+	SourceDynamicDefaultBroker
+	SourceStaticBroker
+	SourceDefault
+)
+
+type DescribeConfigError struct {
+	Err    KError
+	ErrMsg string
+}
+
+func (c *DescribeConfigError) Error() string {
+	text := c.Err.Error()
+	if c.ErrMsg != "" {
+		text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+	}
+	return text
+}
+
+type DescribeConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*ResourceResponse
+}
+
+func (r *DescribeConfigsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type ResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+	Configs   []*ConfigEntry
+}
+
+type ConfigEntry struct {
+	Name      string
+	Value     string
+	ReadOnly  bool
+	Default   bool
+	Source    ConfigSource
+	Sensitive bool
+	Synonyms  []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+	ConfigName  string
+	ConfigValue string
+	Source      ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+	pe.putDurationMs(r.ThrottleTime)
+	if err = pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		if err = c.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ResourceResponse, n)
+	for i := 0; i < n; i++ {
+		rr := &ResourceResponse{}
+		if err := rr.decode(pd, version); err != nil {
+			return err
+		}
+		r.Resources[i] = rr
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+	return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DescribeConfigsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V1_1_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DescribeConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err = pe.putString(r.ErrorMsg); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(r.Type))
+
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Configs {
+		if err = c.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+	ec, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = ec
+
+	em, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.ErrorMsg = em
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Configs = make([]*ConfigEntry, n)
+	for i := 0; i < n; i++ {
+		c := &ConfigEntry{}
+		if err := c.decode(pd, version); err != nil {
+			return err
+		}
+		r.Configs[i] = c
+	}
+	return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putString(r.Value); err != nil {
+		return err
+	}
+
+	pe.putBool(r.ReadOnly)
+
+	if version <= 0 {
+		pe.putBool(r.Default)
+		pe.putBool(r.Sensitive)
+	} else {
+		pe.putInt8(int8(r.Source))
+		pe.putBool(r.Sensitive)
+
+		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+			return err
+		}
+		for _, c := range r.Synonyms {
+			if err = c.encode(pe, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+	if version == 0 {
+		r.Source = SourceUnknown
+	}
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Value = value
+
+	read, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.ReadOnly = read
+
+	if version == 0 {
+		defaultB, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.Default = defaultB
+		if defaultB {
+			r.Source = SourceDefault
+		}
+	} else {
+		source, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Source = ConfigSource(source)
+		r.Default = r.Source == SourceDefault
+	}
+
+	sensitive, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Sensitive = sensitive
+
+	if version > 0 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Synonyms = make([]*ConfigSynonym, n)
+
+		for i := 0; i < n; i++ {
+			s := &ConfigSynonym{}
+			if err := s.decode(pd, version); err != nil {
+				return err
+			}
+			r.Synonyms[i] = s
+		}
+	}
+	return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+	err = pe.putString(c.ConfigName)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putString(c.ConfigValue)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(c.Source))
+
+	return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.ConfigName = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.ConfigValue = value
+
+	source, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	c.Source = ConfigSource(source)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go
new file mode 100644
index 0000000..b2fb07e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+	Version                     int16
+	Groups                      []string
+	IncludeAuthorizedOperations bool
+}
+
+func (r *DescribeGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(r.Groups); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		pe.putBool(r.IncludeAuthorizedOperations)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.Groups, err = pd.getStringArray()
+	if err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil {
+			return err
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+	return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeGroupsRequest) headerVersion() int16 {
+	if r.Version >= 5 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DescribeGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go
new file mode 100644
index 0000000..dcc274d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeGroupsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTimeMs contains the duration in milliseconds for which the
+	// request was throttled due to a quota violation, or zero if the request
+	// did not violate any quota.
+	ThrottleTimeMs int32
+	// Groups contains each described group.
+	Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+
+	for _, block := range r.Groups {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if numGroups, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numGroups > 0 {
+		r.Groups = make([]*GroupDescription, numGroups)
+		for i := 0; i < numGroups; i++ {
+			block := &GroupDescription{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.Groups[i] = block
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+	return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeGroupsResponse) headerVersion() int16 {
+	if r.Version >= 5 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DescribeGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *DescribeGroupsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// GroupDescription contains each described group.
+type GroupDescription struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the describe error as the KError type.
+	Err KError
+	// ErrorCode contains the describe error, or 0 if there was no error.
+	ErrorCode int16
+	// GroupId contains the group ID string.
+	GroupId string
+	// State contains the group state string, or the empty string.
+	State string
+	// ProtocolType contains the group protocol type, or the empty string.
+	ProtocolType string
+	// Protocol contains the group protocol data, or the empty string.
+	Protocol string
+	// Members contains the group members.
+	Members map[string]*GroupMemberDescription
+	// AuthorizedOperations contains a 32-bit bitfield to represent authorized
+	// operations for this group.
+	AuthorizedOperations int32
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) {
+	gd.Version = version
+	pe.putInt16(gd.ErrorCode)
+
+	if err := pe.putString(gd.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.State); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.ProtocolType); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.Protocol); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(gd.Members)); err != nil {
+		return err
+	}
+
+	for _, block := range gd.Members {
+		if err := block.encode(pe, gd.Version); err != nil {
+			return err
+		}
+	}
+
+	if gd.Version >= 3 {
+		pe.putInt32(gd.AuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) {
+	gd.Version = version
+	if gd.ErrorCode, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	gd.Err = KError(gd.ErrorCode)
+
+	if gd.GroupId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.State, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.ProtocolType, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.Protocol, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if numMembers, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numMembers > 0 {
+		gd.Members = make(map[string]*GroupMemberDescription, numMembers)
+		for i := 0; i < numMembers; i++ {
+			block := &GroupMemberDescription{}
+			if err := block.decode(pd, gd.Version); err != nil {
+				return err
+			}
+			gd.Members[block.MemberId] = block
+		}
+	}
+
+	if gd.Version >= 3 {
+		if gd.AuthorizedOperations, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// GroupMemberDescription contains the group members.
+type GroupMemberDescription struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// MemberId contains the member ID assigned by the group coordinator.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// ClientId contains the client ID used in the member's latest join group
+	// request.
+	ClientId string
+	// ClientHost contains the client host.
+	ClientHost string
+	// MemberMetadata contains the metadata corresponding to the current group
+	// protocol in use.
+	MemberMetadata []byte
+	// MemberAssignment contains the current assignment provided by the group
+	// leader.
+	MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) {
+	gmd.Version = version
+	if err := pe.putString(gmd.MemberId); err != nil {
+		return err
+	}
+	if gmd.Version >= 4 {
+		if err := pe.putNullableString(gmd.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+	if err := pe.putString(gmd.ClientId); err != nil {
+		return err
+	}
+	if err := pe.putString(gmd.ClientHost); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) {
+	gmd.Version = version
+	if gmd.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.Version >= 4 {
+		if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+	if gmd.ClientId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.ClientHost, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+		return err
+	}
+	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	if len(gmd.MemberAssignment) == 0 {
+		return nil, nil
+	}
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(gmd.MemberAssignment, assignment, nil)
+	return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+	if len(gmd.MemberMetadata) == 0 {
+		return nil, nil
+	}
+	metadata := new(ConsumerGroupMemberMetadata)
+	err := decode(gmd.MemberMetadata, metadata, nil)
+	return metadata, err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..32354f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
@@ -0,0 +1,124 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	// If this is an empty array, all topics will be queried
+	DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+func (r *DescribeLogDirsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+	Topic        string
+	PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+	length := len(r.DescribeTopics)
+	if length == 0 {
+		// In order to query all topics we must send null
+		length = -1
+	}
+	if err := pe.putArrayLength(length); err != nil {
+		return err
+	}
+
+	for _, d := range r.DescribeTopics {
+		if err := pe.putString(d.Topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n == -1 {
+		n = 0
+	}
+
+	topics := make([]DescribeLogDirsRequestTopic, n)
+	for i := 0; i < n; i++ {
+		topics[i] = DescribeLogDirsRequestTopic{}
+
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		topics[i].Topic = topic
+
+		pIDs, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		topics[i].PartitionIDs = pIDs
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+	}
+	r.DescribeTopics = topics
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+	return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsRequest) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DescribeLogDirsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V3_3_0_0
+	case 3:
+		return V3_2_0_0
+	case 2:
+		return V2_6_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..ebae414
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+	ThrottleTime time.Duration
+
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	LogDirs []DescribeLogDirsResponseDirMetadata
+
+	ErrorCode KError
+}
+
+func (r *DescribeLogDirsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	if r.Version >= 3 {
+		pe.putKError(r.ErrorCode)
+	}
+
+	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+		return err
+	}
+
+	for _, dir := range r.LogDirs {
+		if err := dir.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	if version >= 3 {
+		r.ErrorCode, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Decode array of DescribeLogDirsResponseDirMetadata
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+	for i := 0; i < n; i++ {
+		dir := DescribeLogDirsResponseDirMetadata{}
+		if err := dir.decode(pd, version); err != nil {
+			return err
+		}
+		r.LogDirs[i] = dir
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+	return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsResponse) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DescribeLogDirsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V3_3_0_0
+	case 3:
+		return V3_2_0_0
+	case 2:
+		return V2_6_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V1_0_0_0
+	}
+}
+
+func (r *DescribeLogDirsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+	ErrorCode KError
+
+	// The absolute log directory path
+	Path   string
+	Topics []DescribeLogDirsResponseTopic
+
+	TotalBytes  int64
+	UsableBytes int64
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder, version int16) error {
+	pe.putKError(r.ErrorCode)
+
+	err := pe.putString(r.Path)
+	if err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Topics)); err != nil {
+		return err
+	}
+	for _, topic := range r.Topics {
+		if err := topic.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	if version >= 4 {
+		pe.putInt64(r.TotalBytes)
+		pe.putInt64(r.UsableBytes)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) (err error) {
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	path, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Path = path
+
+	// Decode array of DescribeLogDirsResponseTopic
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]DescribeLogDirsResponseTopic, n)
+	for i := 0; i < n; i++ {
+		t := DescribeLogDirsResponseTopic{}
+
+		if err := t.decode(pd, version); err != nil {
+			return err
+		}
+
+		r.Topics[i] = t
+	}
+
+	if version >= 4 {
+		totalBytes, err := pd.getInt64()
+		if err != nil {
+			return err
+		}
+		r.TotalBytes = totalBytes
+		usableBytes, err := pd.getInt64()
+		if err != nil {
+			return err
+		}
+		r.UsableBytes = usableBytes
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+	Topic      string
+	Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder, version int16) error {
+	if err := pe.putString(r.Topic); err != nil {
+		return err
+	}
+	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
+		return err
+	}
+	for _, partition := range r.Partitions {
+		if err := partition.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Topic = t
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+	for i := 0; i < n; i++ {
+		p := DescribeLogDirsResponsePartition{}
+		if err := p.decode(pd, version); err != nil {
+			return err
+		}
+		r.Partitions[i] = p
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+	PartitionID int32
+
+	// The size of the log segments of the partition in bytes.
+	Size int64
+
+	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+	// current replica's LEO (if it is the future log for the partition)
+	OffsetLag int64
+
+	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+	// the replica in the future.
+	IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder, version int16) error {
+	isFlexible := version >= 2
+	pe.putInt32(r.PartitionID)
+	pe.putInt64(r.Size)
+	pe.putInt64(r.OffsetLag)
+	pe.putBool(r.IsTemporary)
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+	pID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.PartitionID = pID
+
+	size, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.Size = size
+
+	lag, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.OffsetLag = lag
+
+	isTemp, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.IsTemporary = isTemp
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
new file mode 100644
index 0000000..4de3dcf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
@@ -0,0 +1,86 @@
+package sarama
+
+// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
+type DescribeUserScramCredentialsRequest struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	// If this is an empty array, all users will be queried
+	DescribeUsers []DescribeUserScramCredentialsRequestUser
+}
+
+func (r *DescribeUserScramCredentialsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
+type DescribeUserScramCredentialsRequestUser struct {
+	Name string
+}
+
+func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.DescribeUsers)); err != nil {
+		return err
+	}
+	for _, d := range r.DescribeUsers {
+		if err := pe.putString(d.Name); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
+	for i := 0; i < n; i++ {
+		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
+		if r.DescribeUsers[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeUserScramCredentialsRequest) key() int16 {
+	return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
new file mode 100644
index 0000000..6852e0c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
@@ -0,0 +1,186 @@
+package sarama
+
+import "time"
+
+type ScramMechanismType int8
+
+const (
+	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
+	SCRAM_MECHANISM_SHA_256                           // 1
+	SCRAM_MECHANISM_SHA_512                           // 2
+)
+
+func (s ScramMechanismType) String() string {
+	switch s {
+	case 1:
+		return SASLTypeSCRAMSHA256
+	case 2:
+		return SASLTypeSCRAMSHA512
+	default:
+		return "Unknown"
+	}
+}
+
+type DescribeUserScramCredentialsResponse struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	ThrottleTime time.Duration
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	Results []*DescribeUserScramCredentialsResult
+}
+
+func (r *DescribeUserScramCredentialsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type DescribeUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	CredentialInfos []*UserScramCredentialsResponseInfo
+}
+
+type UserScramCredentialsResponseInfo struct {
+	Mechanism  ScramMechanismType
+	Iterations int32
+}
+
+func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Results)); err != nil {
+		return err
+	}
+	for _, u := range r.Results {
+		if err := pe.putString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableString(u.ErrorMessage); err != nil {
+			return err
+		}
+
+		if err := pe.putArrayLength(len(u.CredentialInfos)); err != nil {
+			return err
+		}
+		for _, c := range u.CredentialInfos {
+			pe.putInt8(int8(c.Mechanism))
+			pe.putInt32(c.Iterations)
+			pe.putEmptyTaggedFieldArray()
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numUsers, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numUsers > 0 {
+		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
+		for i := 0; i < numUsers; i++ {
+			r.Results[i] = &DescribeUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getString(); err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+			if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+				return err
+			}
+
+			numCredentialInfos, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
+			for j := 0; j < numCredentialInfos; j++ {
+				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
+				scramMechanism, err := pd.getInt8()
+				if err != nil {
+					return err
+				}
+				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
+				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
+					return err
+				}
+				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeUserScramCredentialsResponse) key() int16 {
+	return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
+
+func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml
new file mode 100644
index 0000000..e2acb38
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+  - go:
+      version: '1.17.6'
+
+commands:
+  test:
+    run: make test
+    desc: 'run unit tests'
diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml
new file mode 100644
index 0000000..3d23396
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/docker-compose.yml
@@ -0,0 +1,175 @@
+x-zookeeper-base: &zookeeper-base
+  image: 'docker.io/library/zookeeper:3.7.2'
+  init: true
+  restart: always
+  profiles:
+    - zookeeper
+  environment: &zookeeper-base-env
+    ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888'
+    ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888'
+    ZOO_INIT_LIMIT: '10'
+    ZOO_SYNC_LIMIT: '5'
+    ZOO_MAX_CLIENT_CNXNS: '0'
+    ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok'
+
+x-kafka-base: &kafka-base
+  image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.9.1}'
+  init: true
+  build:
+    context: .
+    dockerfile: Dockerfile.kafka
+    args:
+      KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+      SCALA_VERSION: ${SCALA_VERSION:-2.13}
+  depends_on:
+    - toxiproxy
+  restart: always
+  environment: &kafka-base-env
+    KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+    KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000'
+    KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000'
+    KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+    KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+    KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false'
+    KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+    KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions"
+    KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+    KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT,CONTROLLER:PLAINTEXT'
+    # ZooKeeper-specific
+    KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+    # KRaft-specific
+    KAFKA_CFG_PROCESS_ROLES: 'broker,controller'
+    KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: '1@kafka-1:9093,2@kafka-2:9093,3@kafka-3:9093,4@kafka-4:9093,5@kafka-5:9093'
+    KAFKA_CFG_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+    KAFKA_CFG_CLUSTER_ID: 'cDZEekk4T3hTNGVlNzB3LUtUbkxaQQo='
+
+services:
+  zookeeper-1:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-1'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '1'
+
+  zookeeper-2:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-2'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '2'
+
+  zookeeper-3:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-3'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '3'
+
+  kafka-1:
+    <<: *kafka-base
+    container_name: 'kafka-1'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '1'
+      KAFKA_CFG_NODE_ID: '1'
+      KAFKA_CFG_BROKER_RACK: '1'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
+
+  kafka-2:
+    <<: *kafka-base
+    container_name: 'kafka-2'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '2'
+      KAFKA_CFG_NODE_ID: '2'
+      KAFKA_CFG_BROKER_RACK: '2'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
+
+  kafka-3:
+    <<: *kafka-base
+    container_name: 'kafka-3'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '3'
+      KAFKA_CFG_NODE_ID: '3'
+      KAFKA_CFG_BROKER_RACK: '3'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
+
+  kafka-4:
+    <<: *kafka-base
+    container_name: 'kafka-4'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '4'
+      KAFKA_CFG_NODE_ID: '4'
+      KAFKA_CFG_BROKER_RACK: '4'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
+
+  kafka-5:
+    <<: *kafka-base
+    container_name: 'kafka-5'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '5'
+      KAFKA_CFG_NODE_ID: '5'
+      KAFKA_CFG_BROKER_RACK: '5'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
+
+  toxiproxy:
+    container_name: 'toxiproxy'
+    image: 'ghcr.io/shopify/toxiproxy:2.12.0'
+    init: true
+    healthcheck:
+      test: ['CMD', '/toxiproxy-cli', 'l']
+      interval: 15s
+      timeout: 15s
+      retries: 3
+      start_period: 30s
+    ports:
+      # The tests themselves actually start the proxies on these ports
+      - '29091:29091'
+      - '29092:29092'
+      - '29093:29093'
+      - '29094:29094'
+      - '29095:29095'
+
+      # This is the toxiproxy API port
+      - '8474:8474'
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_request.go b/vendor/github.com/IBM/sarama/elect_leaders_request.go
new file mode 100644
index 0000000..01b9dd5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type ElectLeadersRequest struct {
+	Version         int16
+	Type            ElectionType
+	TopicPartitions map[string][]int32
+	TimeoutMs       int32
+}
+
+func (r *ElectLeadersRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ElectLeadersRequest) encode(pe packetEncoder) error {
+	if r.Version > 0 {
+		pe.putInt8(int8(r.Type))
+	}
+
+	if err := pe.putArrayLength(len(r.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ElectLeadersRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version > 0 {
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Type = ElectionType(t)
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.TopicPartitions = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			partitions := make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				partitions[j] = partition
+			}
+			r.TopicPartitions[topic] = partitions
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	r.TimeoutMs, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ElectLeadersRequest) key() int16 {
+	return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ElectLeadersRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ElectLeadersRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *ElectLeadersRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_response.go b/vendor/github.com/IBM/sarama/elect_leaders_response.go
new file mode 100644
index 0000000..90a3fab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_response.go
@@ -0,0 +1,161 @@
+package sarama
+
+import "time"
+
+type PartitionResult struct {
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (b *PartitionResult) encode(pe packetEncoder, version int16) error {
+	pe.putKError(b.ErrorCode)
+	if err := pe.putNullableString(b.ErrorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *PartitionResult) decode(pd packetDecoder, version int16) (err error) {
+	b.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+	b.ErrorMessage, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ElectLeadersResponse struct {
+	Version                int16
+	ThrottleTimeMs         int32
+	ErrorCode              KError
+	ReplicaElectionResults map[string]map[int32]*PartitionResult
+}
+
+func (r *ElectLeadersResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ElectLeadersResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+
+	if r.Version > 0 {
+		pe.putKError(r.ErrorCode)
+	}
+
+	if err := pe.putArrayLength(len(r.ReplicaElectionResults)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.ReplicaElectionResults {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, result := range partitions {
+			pe.putInt32(partition)
+			if err := result.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ElectLeadersResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version > 0 {
+		r.ErrorCode, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.ReplicaElectionResults = make(map[string]map[int32]*PartitionResult, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numPartitions, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.ReplicaElectionResults[topic] = make(map[int32]*PartitionResult, numPartitions)
+		for j := 0; j < numPartitions; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			result := new(PartitionResult)
+			if err := result.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.ReplicaElectionResults[topic][partition] = result
+		}
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ElectLeadersResponse) key() int16 {
+	return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ElectLeadersResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ElectLeadersResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *ElectLeadersResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *ElectLeadersResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/election_type.go b/vendor/github.com/IBM/sarama/election_type.go
new file mode 100644
index 0000000..01f3b65
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/election_type.go
@@ -0,0 +1,10 @@
+package sarama
+
+type ElectionType int8
+
+const (
+	// PreferredElection constant type
+	PreferredElection ElectionType = 0
+	// UncleanElection constant type
+	UncleanElection ElectionType = 1
+)
diff --git a/vendor/github.com/IBM/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go
new file mode 100644
index 0000000..ef60224
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/encoder_decoder.go
@@ -0,0 +1,134 @@
+package sarama
+
+import (
+	"fmt"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+	encode(pe packetEncoder) error
+}
+
+type encoderWithHeader interface {
+	encoder
+	headerVersion() int16
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+	if e == nil {
+		return nil, nil
+	}
+
+	var prepEnc prepEncoder
+	var realEnc realEncoder
+
+	err := e.encode(prepareFlexibleEncoder(&prepEnc, e))
+	if err != nil {
+		return nil, err
+	}
+
+	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+	}
+
+	realEnc.raw = make([]byte, prepEnc.length)
+	realEnc.registry = metricRegistry
+	err = e.encode(prepareFlexibleEncoder(&realEnc, e))
+	if err != nil {
+		return nil, err
+	}
+
+	return realEnc.raw, nil
+}
+
+// decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+	decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+	decode(pd packetDecoder, version int16) error
+}
+
+type flexibleVersion interface {
+	isFlexibleVersion(version int16) bool
+	isFlexible() bool
+}
+
+// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error {
+	if buf == nil {
+		return nil
+	}
+	helper := realDecoder{
+		raw:      buf,
+		registry: metricRegistry,
+	}
+	err := in.decode(&helper)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{fmt.Sprintf("invalid length: buf=%d decoded=%d %#v", len(buf), helper.off, in)}
+	}
+
+	return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := prepareFlexibleDecoder(&realDecoder{
+		raw:      buf,
+		registry: metricRegistry,
+	}, in, version)
+	err := in.decode(helper, version)
+	if err != nil {
+		return err
+	}
+
+	if remaining := helper.remaining(); remaining != 0 {
+		return PacketDecodingError{
+			Info: fmt.Sprintf("invalid length len=%d remaining=%d", len(buf), remaining),
+		}
+	}
+
+	return nil
+}
+
+func prepareFlexibleDecoder(pd *realDecoder, in versionedDecoder, version int16) packetDecoder {
+	if flexibleDecoder, ok := in.(flexibleVersion); ok && flexibleDecoder.isFlexibleVersion(version) {
+		return &realFlexibleDecoder{pd}
+	}
+	return pd
+}
+
+func prepareFlexibleEncoder(pe packetEncoder, req encoder) packetEncoder {
+	if flexibleEncoder, ok := req.(flexibleVersion); ok && flexibleEncoder.isFlexible() {
+		switch e := pe.(type) {
+		case *prepEncoder:
+			return &prepFlexibleEncoder{e}
+		case *realEncoder:
+			return &realFlexibleEncoder{e}
+		default:
+			return pe
+		}
+	}
+	return pe
+}
+
+func downgradeFlexibleDecoder(pd packetDecoder) packetDecoder {
+	if f, ok := pd.(*realFlexibleDecoder); ok {
+		return f.realDecoder
+	}
+	return pd
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go
new file mode 100644
index 0000000..1f0bf8c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+type EndTxnRequest struct {
+	Version           int16
+	TransactionalID   string
+	ProducerID        int64
+	ProducerEpoch     int16
+	TransactionResult bool
+}
+
+func (a *EndTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	pe.putBool(a.TransactionResult)
+
+	return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.TransactionResult, err = pd.getBool(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+	return apiKeyEndTxn
+}
+
+func (a *EndTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (r *EndTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *EndTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go
new file mode 100644
index 0000000..259f18b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_response.go
@@ -0,0 +1,65 @@
+package sarama
+
+import (
+	"time"
+)
+
+type EndTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (e *EndTxnResponse) setVersion(v int16) {
+	e.Version = v
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(e.ThrottleTime)
+	pe.putKError(e.Err)
+	return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	if e.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	e.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+	return apiKeyEndTxn
+}
+
+func (e *EndTxnResponse) version() int16 {
+	return e.Version
+}
+
+func (r *EndTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (e *EndTxnResponse) isValidVersion() bool {
+	return e.Version >= 0 && e.Version <= 2
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+	switch e.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *EndTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh
new file mode 100644
index 0000000..9815d90
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/entrypoint.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+set -eu
+set -o pipefail
+
+KAFKA_VERSION="${KAFKA_VERSION:-3.9.1}"
+KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}"
+
+if [ ! -d "${KAFKA_HOME}" ]; then
+    echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME
+    exit 1
+fi
+
+cd "${KAFKA_HOME}" || exit 1
+
+# discard all empty/commented lines from default config and copy to /tmp
+sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties
+
+echo "########################################################################" >>/tmp/server.properties
+
+# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka
+for var in "${!KAFKA_CFG_@}"; do
+    key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')"
+    sed -e '/^'$key'/d' -i"" /tmp/server.properties
+    value="${!var}"
+    echo "$key=$value" >>/tmp/server.properties
+done
+
+# use KRaft if KAFKA_VERSION is 4.0.0 or newer
+if printf "%s\n4.0.0" "$KAFKA_VERSION" | sort -V | head -n1 | grep -q "^4.0.0$"; then
+  # remove zookeeper-only options from server.properties and setup storage
+  sed -e '/zookeeper.connect/d' \
+      -i /tmp/server.properties
+  bin/kafka-storage.sh format -t "$KAFKA_CFG_CLUSTER_ID" -c /tmp/server.properties --ignore-formatted
+else
+  # remove KRaft-only options from server.properties
+  sed -e '/process.roles/d' \
+      -e '/controller.quorum.voters/d' \
+      -e '/controller.listener.names/d' \
+      -e '/cluster.id/d' \
+      -i /tmp/server.properties
+fi
+
+sort /tmp/server.properties
+
+exec bin/kafka-server-start.sh /tmp/server.properties
diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go
new file mode 100644
index 0000000..55af653
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/errors.go
@@ -0,0 +1,450 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to")
+
+// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
+var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
+var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
+
+// ErrReassignPartitions is returned when altering partition assignments for a topic fails
+var ErrReassignPartitions = errors.New("failed to reassign partitions for topic")
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+var ErrDeleteRecords = errors.New("kafka server: failed to delete records")
+
+// ErrCreateACLs is the type of error returned when ACL creation failed
+var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules")
+
+// ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times
+var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction")
+
+// ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times
+var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction")
+
+// ErrTransactionNotReady when transaction status is invalid for the current action.
+var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready")
+
+// ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer.
+var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer")
+
+// ErrTransitionNotAllowed when txnmgr state transition is not valid.
+var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted")
+
+// ErrCannotTransitionNilError when transition is attempted with an nil error.
+var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error")
+
+// ErrTxnUnableToParseResponse when response is nil
+var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response")
+
+// ErrUnknownMessage when the protocol message key is not recognized
+var ErrUnknownMessage = errors.New("kafka: unknown protocol message key")
+
+// MultiErrorFormat specifies the formatter applied to format multierrors.
+//
+// Deprecated: Please use [errors.Join] instead.
+func MultiErrorFormat(es []error) string {
+	if len(es) == 1 {
+		return es[0].Error()
+	}
+
+	points := make([]string, len(es))
+	for i, err := range es {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	return fmt.Sprintf(
+		"%d errors occurred:\n\t%s\n",
+		len(es), strings.Join(points, "\n\t"))
+}
+
+type sentinelError struct {
+	sentinel error
+	wrapped  error
+}
+
+func (err sentinelError) Error() string {
+	if err.wrapped != nil {
+		return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped)
+	} else {
+		return fmt.Sprintf("%s", err.sentinel)
+	}
+}
+
+func (err sentinelError) Is(target error) bool {
+	return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target)
+}
+
+func (err sentinelError) Unwrap() error {
+	return err.wrapped
+}
+
+func Wrap(sentinel error, wrapped ...error) sentinelError {
+	return sentinelError{sentinel: sentinel, wrapped: errors.Join(wrapped...)}
+}
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+	Info string
+}
+
+func (err PacketEncodingError) Error() string {
+	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+	Info string
+}
+
+func (err PacketDecodingError) Error() string {
+	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+	return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+	ErrUnknown                            KError = -1 // Errors.UNKNOWN_SERVER_ERROR
+	ErrNoError                            KError = 0  // Errors.NONE
+	ErrOffsetOutOfRange                   KError = 1  // Errors.OFFSET_OUT_OF_RANGE
+	ErrInvalidMessage                     KError = 2  // Errors.CORRUPT_MESSAGE
+	ErrUnknownTopicOrPartition            KError = 3  // Errors.UNKNOWN_TOPIC_OR_PARTITION
+	ErrInvalidMessageSize                 KError = 4  // Errors.INVALID_FETCH_SIZE
+	ErrLeaderNotAvailable                 KError = 5  // Errors.LEADER_NOT_AVAILABLE
+	ErrNotLeaderForPartition              KError = 6  // Errors.NOT_LEADER_OR_FOLLOWER
+	ErrRequestTimedOut                    KError = 7  // Errors.REQUEST_TIMED_OUT
+	ErrBrokerNotAvailable                 KError = 8  // Errors.BROKER_NOT_AVAILABLE
+	ErrReplicaNotAvailable                KError = 9  // Errors.REPLICA_NOT_AVAILABLE
+	ErrMessageSizeTooLarge                KError = 10 // Errors.MESSAGE_TOO_LARGE
+	ErrStaleControllerEpochCode           KError = 11 // Errors.STALE_CONTROLLER_EPOCH
+	ErrOffsetMetadataTooLarge             KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE
+	ErrNetworkException                   KError = 13 // Errors.NETWORK_EXCEPTION
+	ErrOffsetsLoadInProgress              KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS
+	ErrConsumerCoordinatorNotAvailable    KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE
+	ErrNotCoordinatorForConsumer          KError = 16 // Errors.NOT_COORDINATOR
+	ErrInvalidTopic                       KError = 17 // Errors.INVALID_TOPIC_EXCEPTION
+	ErrMessageSetSizeTooLarge             KError = 18 // Errors.RECORD_LIST_TOO_LARGE
+	ErrNotEnoughReplicas                  KError = 19 // Errors.NOT_ENOUGH_REPLICAS
+	ErrNotEnoughReplicasAfterAppend       KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND
+	ErrInvalidRequiredAcks                KError = 21 // Errors.INVALID_REQUIRED_ACKS
+	ErrIllegalGeneration                  KError = 22 // Errors.ILLEGAL_GENERATION
+	ErrInconsistentGroupProtocol          KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL
+	ErrInvalidGroupId                     KError = 24 // Errors.INVALID_GROUP_ID
+	ErrUnknownMemberId                    KError = 25 // Errors.UNKNOWN_MEMBER_ID
+	ErrInvalidSessionTimeout              KError = 26 // Errors.INVALID_SESSION_TIMEOUT
+	ErrRebalanceInProgress                KError = 27 // Errors.REBALANCE_IN_PROGRESS
+	ErrInvalidCommitOffsetSize            KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE
+	ErrTopicAuthorizationFailed           KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED
+	ErrGroupAuthorizationFailed           KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED
+	ErrClusterAuthorizationFailed         KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED
+	ErrInvalidTimestamp                   KError = 32 // Errors.INVALID_TIMESTAMP
+	ErrUnsupportedSASLMechanism           KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM
+	ErrIllegalSASLState                   KError = 34 // Errors.ILLEGAL_SASL_STATE
+	ErrUnsupportedVersion                 KError = 35 // Errors.UNSUPPORTED_VERSION
+	ErrTopicAlreadyExists                 KError = 36 // Errors.TOPIC_ALREADY_EXISTS
+	ErrInvalidPartitions                  KError = 37 // Errors.INVALID_PARTITIONS
+	ErrInvalidReplicationFactor           KError = 38 // Errors.INVALID_REPLICATION_FACTOR
+	ErrInvalidReplicaAssignment           KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT
+	ErrInvalidConfig                      KError = 40 // Errors.INVALID_CONFIG
+	ErrNotController                      KError = 41 // Errors.NOT_CONTROLLER
+	ErrInvalidRequest                     KError = 42 // Errors.INVALID_REQUEST
+	ErrUnsupportedForMessageFormat        KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT
+	ErrPolicyViolation                    KError = 44 // Errors.POLICY_VIOLATION
+	ErrOutOfOrderSequenceNumber           KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER
+	ErrDuplicateSequenceNumber            KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER
+	ErrInvalidProducerEpoch               KError = 47 // Errors.INVALID_PRODUCER_EPOCH
+	ErrInvalidTxnState                    KError = 48 // Errors.INVALID_TXN_STATE
+	ErrInvalidProducerIDMapping           KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING
+	ErrInvalidTransactionTimeout          KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT
+	ErrConcurrentTransactions             KError = 51 // Errors.CONCURRENT_TRANSACTIONS
+	ErrTransactionCoordinatorFenced       KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED
+	ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED
+	ErrSecurityDisabled                   KError = 54 // Errors.SECURITY_DISABLED
+	ErrOperationNotAttempted              KError = 55 // Errors.OPERATION_NOT_ATTEMPTED
+	ErrKafkaStorageError                  KError = 56 // Errors.KAFKA_STORAGE_ERROR
+	ErrLogDirNotFound                     KError = 57 // Errors.LOG_DIR_NOT_FOUND
+	ErrSASLAuthenticationFailed           KError = 58 // Errors.SASL_AUTHENTICATION_FAILED
+	ErrUnknownProducerID                  KError = 59 // Errors.UNKNOWN_PRODUCER_ID
+	ErrReassignmentInProgress             KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS
+	ErrDelegationTokenAuthDisabled        KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED
+	ErrDelegationTokenNotFound            KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND
+	ErrDelegationTokenOwnerMismatch       KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH
+	ErrDelegationTokenRequestNotAllowed   KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED
+	ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED
+	ErrDelegationTokenExpired             KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED
+	ErrInvalidPrincipalType               KError = 67 // Errors.INVALID_PRINCIPAL_TYPE
+	ErrNonEmptyGroup                      KError = 68 // Errors.NON_EMPTY_GROUP
+	ErrGroupIDNotFound                    KError = 69 // Errors.GROUP_ID_NOT_FOUND
+	ErrFetchSessionIDNotFound             KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND
+	ErrInvalidFetchSessionEpoch           KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH
+	ErrListenerNotFound                   KError = 72 // Errors.LISTENER_NOT_FOUND
+	ErrTopicDeletionDisabled              KError = 73 // Errors.TOPIC_DELETION_DISABLED
+	ErrFencedLeaderEpoch                  KError = 74 // Errors.FENCED_LEADER_EPOCH
+	ErrUnknownLeaderEpoch                 KError = 75 // Errors.UNKNOWN_LEADER_EPOCH
+	ErrUnsupportedCompressionType         KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE
+	ErrStaleBrokerEpoch                   KError = 77 // Errors.STALE_BROKER_EPOCH
+	ErrOffsetNotAvailable                 KError = 78 // Errors.OFFSET_NOT_AVAILABLE
+	ErrMemberIdRequired                   KError = 79 // Errors.MEMBER_ID_REQUIRED
+	ErrPreferredLeaderNotAvailable        KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE
+	ErrGroupMaxSizeReached                KError = 81 // Errors.GROUP_MAX_SIZE_REACHED
+	ErrFencedInstancedId                  KError = 82 // Errors.FENCED_INSTANCE_ID
+	ErrEligibleLeadersNotAvailable        KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE
+	ErrElectionNotNeeded                  KError = 84 // Errors.ELECTION_NOT_NEEDED
+	ErrNoReassignmentInProgress           KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS
+	ErrGroupSubscribedToTopic             KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC
+	ErrInvalidRecord                      KError = 87 // Errors.INVALID_RECORD
+	ErrUnstableOffsetCommit               KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT
+	ErrThrottlingQuotaExceeded            KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED
+	ErrProducerFenced                     KError = 90 // Errors.PRODUCER_FENCED
+)
+
+func (err KError) Error() string {
+	// Error messages stolen/adapted from
+	// https://kafka.apache.org/protocol#protocol_error_codes
+	switch err {
+	case ErrNoError:
+		return "kafka server: Not an error, why are you printing me?"
+	case ErrUnknown:
+		return "kafka server: Unexpected (unknown?) server error"
+	case ErrOffsetOutOfRange:
+		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition"
+	case ErrInvalidMessage:
+		return "kafka server: Message contents does not match its CRC"
+	case ErrUnknownTopicOrPartition:
+		return "kafka server: Request was for a topic or partition that does not exist on this broker"
+	case ErrInvalidMessageSize:
+		return "kafka server: The message has a negative size"
+	case ErrLeaderNotAvailable:
+		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes"
+	case ErrNotLeaderForPartition:
+		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date"
+	case ErrRequestTimedOut:
+		return "kafka server: Request exceeded the user-specified time limit in the request"
+	case ErrBrokerNotAvailable:
+		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+	case ErrReplicaNotAvailable:
+		return "kafka server: Replica information not available, one or more brokers are down"
+	case ErrMessageSizeTooLarge:
+		return "kafka server: Message was too large, server rejected it to avoid allocation error"
+	case ErrStaleControllerEpochCode:
+		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)"
+	case ErrOffsetMetadataTooLarge:
+		return "kafka server: Specified a string larger than the configured maximum for offset metadata"
+	case ErrNetworkException:
+		return "kafka server: The server disconnected before a response was received"
+	case ErrOffsetsLoadInProgress:
+		return "kafka server: The coordinator is still loading offsets and cannot currently process requests"
+	case ErrConsumerCoordinatorNotAvailable:
+		return "kafka server: The coordinator is not available"
+	case ErrNotCoordinatorForConsumer:
+		return "kafka server: Request was for a consumer group that is not coordinated by this broker"
+	case ErrInvalidTopic:
+		return "kafka server: The request attempted to perform an operation on an invalid topic"
+	case ErrMessageSetSizeTooLarge:
+		return "kafka server: The request included message batch larger than the configured segment size on the server"
+	case ErrNotEnoughReplicas:
+		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required"
+	case ErrNotEnoughReplicasAfterAppend:
+		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required"
+	case ErrInvalidRequiredAcks:
+		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)"
+	case ErrIllegalGeneration:
+		return "kafka server: The provided generation id is not the current generation"
+	case ErrInconsistentGroupProtocol:
+		return "kafka server: The provider group protocol type is incompatible with the other members"
+	case ErrInvalidGroupId:
+		return "kafka server: The provided group id was empty"
+	case ErrUnknownMemberId:
+		return "kafka server: The provided member is not known in the current generation"
+	case ErrInvalidSessionTimeout:
+		return "kafka server: The provided session timeout is outside the allowed range"
+	case ErrRebalanceInProgress:
+		return "kafka server: A rebalance for the group is in progress. Please re-join the group"
+	case ErrInvalidCommitOffsetSize:
+		return "kafka server: The provided commit metadata was too large"
+	case ErrTopicAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this topic"
+	case ErrGroupAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this group"
+	case ErrClusterAuthorizationFailed:
+		return "kafka server: The client is not authorized to send this request type"
+	case ErrInvalidTimestamp:
+		return "kafka server: The timestamp of the message is out of acceptable range"
+	case ErrUnsupportedSASLMechanism:
+		return "kafka server: The broker does not support the requested SASL mechanism"
+	case ErrIllegalSASLState:
+		return "kafka server: Request is not valid given the current SASL state"
+	case ErrUnsupportedVersion:
+		return "kafka server: The version of API is not supported"
+	case ErrTopicAlreadyExists:
+		return "kafka server: Topic with this name already exists"
+	case ErrInvalidPartitions:
+		return "kafka server: Number of partitions is invalid"
+	case ErrInvalidReplicationFactor:
+		return "kafka server: Replication-factor is invalid"
+	case ErrInvalidReplicaAssignment:
+		return "kafka server: Replica assignment is invalid"
+	case ErrInvalidConfig:
+		return "kafka server: Configuration is invalid"
+	case ErrNotController:
+		return "kafka server: This is not the correct controller for this cluster"
+	case ErrInvalidRequest:
+		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details"
+	case ErrUnsupportedForMessageFormat:
+		return "kafka server: The requested operation is not supported by the message format version"
+	case ErrPolicyViolation:
+		return "kafka server: Request parameters do not satisfy the configured policy"
+	case ErrOutOfOrderSequenceNumber:
+		return "kafka server: The broker received an out of order sequence number"
+	case ErrDuplicateSequenceNumber:
+		return "kafka server: The broker received a duplicate sequence number"
+	case ErrInvalidProducerEpoch:
+		return "kafka server: Producer attempted an operation with an old epoch"
+	case ErrInvalidTxnState:
+		return "kafka server: The producer attempted a transactional operation in an invalid state"
+	case ErrInvalidProducerIDMapping:
+		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id"
+	case ErrInvalidTransactionTimeout:
+		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)"
+	case ErrConcurrentTransactions:
+		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing"
+	case ErrTransactionCoordinatorFenced:
+		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer"
+	case ErrTransactionalIDAuthorizationFailed:
+		return "kafka server: Transactional ID authorization failed"
+	case ErrSecurityDisabled:
+		return "kafka server: Security features are disabled"
+	case ErrOperationNotAttempted:
+		return "kafka server: The broker did not attempt to execute this operation"
+	case ErrKafkaStorageError:
+		return "kafka server: Disk error when trying to access log file on the disk"
+	case ErrLogDirNotFound:
+		return "kafka server: The specified log directory is not found in the broker config"
+	case ErrSASLAuthenticationFailed:
+		return "kafka server: SASL Authentication failed"
+	case ErrUnknownProducerID:
+		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID"
+	case ErrReassignmentInProgress:
+		return "kafka server: A partition reassignment is in progress"
+	case ErrDelegationTokenAuthDisabled:
+		return "kafka server: Delegation Token feature is not enabled"
+	case ErrDelegationTokenNotFound:
+		return "kafka server: Delegation Token is not found on server"
+	case ErrDelegationTokenOwnerMismatch:
+		return "kafka server: Specified Principal is not valid Owner/Renewer"
+	case ErrDelegationTokenRequestNotAllowed:
+		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels"
+	case ErrDelegationTokenAuthorizationFailed:
+		return "kafka server: Delegation Token authorization failed"
+	case ErrDelegationTokenExpired:
+		return "kafka server: Delegation Token is expired"
+	case ErrInvalidPrincipalType:
+		return "kafka server: Supplied principalType is not supported"
+	case ErrNonEmptyGroup:
+		return "kafka server: The group is not empty"
+	case ErrGroupIDNotFound:
+		return "kafka server: The group id does not exist"
+	case ErrFetchSessionIDNotFound:
+		return "kafka server: The fetch session ID was not found"
+	case ErrInvalidFetchSessionEpoch:
+		return "kafka server: The fetch session epoch is invalid"
+	case ErrListenerNotFound:
+		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed"
+	case ErrTopicDeletionDisabled:
+		return "kafka server: Topic deletion is disabled"
+	case ErrFencedLeaderEpoch:
+		return "kafka server: The leader epoch in the request is older than the epoch on the broker"
+	case ErrUnknownLeaderEpoch:
+		return "kafka server: The leader epoch in the request is newer than the epoch on the broker"
+	case ErrUnsupportedCompressionType:
+		return "kafka server: The requesting client does not support the compression type of given partition"
+	case ErrStaleBrokerEpoch:
+		return "kafka server: Broker epoch has changed"
+	case ErrOffsetNotAvailable:
+		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+	case ErrMemberIdRequired:
+		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+	case ErrPreferredLeaderNotAvailable:
+		return "kafka server: The preferred leader was not available"
+	case ErrGroupMaxSizeReached:
+		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members"
+	case ErrFencedInstancedId:
+		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id"
+	case ErrEligibleLeadersNotAvailable:
+		return "kafka server: Eligible topic partition leaders are not available"
+	case ErrElectionNotNeeded:
+		return "kafka server: Leader election not needed for topic partition"
+	case ErrNoReassignmentInProgress:
+		return "kafka server: No partition reassignment is in progress"
+	case ErrGroupSubscribedToTopic:
+		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it"
+	case ErrInvalidRecord:
+		return "kafka server: This record has failed the validation on broker and hence will be rejected"
+	case ErrUnstableOffsetCommit:
+		return "kafka server: There are unstable offsets that need to be cleared"
+	}
+
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go
new file mode 100644
index 0000000..6d072dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_request.go
@@ -0,0 +1,340 @@
+package sarama
+
+import "fmt"
+
+type fetchRequestBlock struct {
+	Version int16
+	// currentLeaderEpoch contains the current leader epoch of the partition.
+	currentLeaderEpoch int32
+	// fetchOffset contains the message offset.
+	fetchOffset int64
+	// logStartOffset contains the earliest available offset of the follower
+	// replica.  The field is only used when the request is sent by the
+	// follower.
+	logStartOffset int64
+	// maxBytes contains the maximum bytes to fetch from this partition.  See
+	// KIP-74 for cases where this limit may not be honored.
+	maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
+	b.Version = version
+	if b.Version >= 9 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
+	pe.putInt64(b.fetchOffset)
+	if b.Version >= 5 {
+		pe.putInt64(b.logStartOffset)
+	}
+	pe.putInt32(b.maxBytes)
+	return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Version = version
+	if b.Version >= 9 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if b.fetchOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if b.Version >= 5 {
+		if b.logStartOffset, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	if b.maxBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ReplicaID contains the broker ID of the follower, of -1 if this request
+	// is from a consumer.
+	// ReplicaID int32
+	// MaxWaitTime contains the maximum time in milliseconds to wait for the response.
+	MaxWaitTime int32
+	// MinBytes contains the minimum bytes to accumulate in the response.
+	MinBytes int32
+	// MaxBytes contains the maximum bytes to fetch.  See KIP-74 for cases
+	// where this limit may not be honored.
+	MaxBytes int32
+	// Isolation contains a This setting controls the visibility of
+	// transactional records. Using READ_UNCOMMITTED (isolation_level = 0)
+	// makes all records visible. With READ_COMMITTED (isolation_level = 1),
+	// non-transactional and COMMITTED transactional records are visible. To be
+	// more concrete, READ_COMMITTED returns all data from offsets smaller than
+	// the current LSO (last stable offset), and enables the inclusion of the
+	// list of aborted transactions in the result, which allows consumers to
+	// discard ABORTED transactional records
+	Isolation IsolationLevel
+	// SessionID contains the fetch session ID.
+	SessionID int32
+	// SessionEpoch contains the epoch of the partition leader as known to the
+	// follower replica or a consumer.
+	SessionEpoch int32
+	// blocks contains the topics to fetch.
+	blocks map[string]map[int32]*fetchRequestBlock
+	// forgotten contains in an incremental fetch request, the partitions to remove.
+	forgotten map[string][]int32
+	// RackID contains a Rack ID of the consumer making this request
+	RackID string
+}
+
+func (r *FetchRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = iota
+	ReadCommitted
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+	metricRegistry := pe.metricRegistry()
+
+	pe.putInt32(-1) // ReplicaID is always -1 for clients
+	pe.putInt32(r.MaxWaitTime)
+	pe.putInt32(r.MinBytes)
+	if r.Version >= 3 {
+		pe.putInt32(r.MaxBytes)
+	}
+	if r.Version >= 4 {
+		pe.putInt8(int8(r.Isolation))
+	}
+	if r.Version >= 7 {
+		pe.putInt32(r.SessionID)
+		pe.putInt32(r.SessionEpoch)
+	}
+	err = pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, blocks := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(blocks))
+		if err != nil {
+			return err
+		}
+		for partition, block := range blocks {
+			pe.putInt32(partition)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+		getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1)
+	}
+	if r.Version >= 7 {
+		err = pe.putArrayLength(len(r.forgotten))
+		if err != nil {
+			return err
+		}
+		for topic, partitions := range r.forgotten {
+			err = pe.putString(topic)
+			if err != nil {
+				return err
+			}
+			err = pe.putArrayLength(len(partitions))
+			if err != nil {
+				return err
+			}
+			for _, partition := range partitions {
+				pe.putInt32(partition)
+			}
+		}
+	}
+	if r.Version >= 11 {
+		err = pe.putString(r.RackID)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if _, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MinBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.MaxBytes, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 4 {
+		isolation, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Isolation = IsolationLevel(isolation)
+	}
+	if r.Version >= 7 {
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.SessionEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			fetchBlock := &fetchRequestBlock{}
+			if err = fetchBlock.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = fetchBlock
+		}
+	}
+
+	if r.Version >= 7 {
+		forgottenCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.forgotten = make(map[string][]int32)
+		for i := 0; i < forgottenCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			if partitionCount < 0 {
+				return fmt.Errorf("partitionCount %d is invalid", partitionCount)
+			}
+			r.forgotten[topic] = make([]int32, partitionCount)
+
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.forgotten[topic][j] = partition
+			}
+		}
+	}
+
+	if r.Version >= 11 {
+		r.RackID, err = pd.getString()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchRequest) key() int16 {
+	return apiKeyFetch
+}
+
+func (r *FetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *FetchRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *FetchRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 11:
+		return V2_3_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 6:
+		return V1_0_0_0
+	case 4, 5:
+		return V0_11_0_0
+	case 3:
+		return V0_10_1_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	}
+
+	if r.Version >= 7 && r.forgotten == nil {
+		r.forgotten = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+	}
+
+	tmp := new(fetchRequestBlock)
+	tmp.Version = r.Version
+	tmp.maxBytes = maxBytes
+	tmp.fetchOffset = fetchOffset
+	if r.Version >= 9 {
+		tmp.currentLeaderEpoch = leaderEpoch
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go
new file mode 100644
index 0000000..95de90e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_response.go
@@ -0,0 +1,605 @@
+package sarama
+
+import (
+	"errors"
+	"sort"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+const (
+	invalidLeaderEpoch        = -1
+	invalidPreferredReplicaID = -1
+)
+
+type AbortedTransaction struct {
+	// ProducerID contains the producer id associated with the aborted transaction.
+	ProducerID int64
+	// FirstOffset contains the first offset in the aborted transaction.
+	FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if t.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+	pe.putInt64(t.ProducerID)
+	pe.putInt64(t.FirstOffset)
+
+	return nil
+}
+
+type FetchResponseBlock struct {
+	// Err contains the error code, or 0 if there was no fetch error.
+	Err KError
+	// HighWatermarkOffset contains the current high water mark.
+	HighWaterMarkOffset int64
+	// LastStableOffset contains the last stable offset (or LSO) of the
+	// partition. This is the last offset such that the state of all
+	// transactional records prior to this offset have been decided (ABORTED or
+	// COMMITTED)
+	LastStableOffset int64
+	// LogStartOffset contains the current log start offset.
+	LogStartOffset int64
+	// AbortedTransactions contains the aborted transactions.
+	AbortedTransactions []*AbortedTransaction
+	// PreferredReadReplica contains the preferred read replica for the
+	// consumer to use on its next fetch request
+	PreferredReadReplica int32
+	// RecordsSet contains the record data.
+	RecordsSet []*Records
+
+	Partial bool
+	Records *Records // deprecated: use FetchResponseBlock.RecordsSet
+
+	// recordsNextOffset contains the next consecutive offset following this response block.
+	// This field is computed locally and is not part of the server's binary response.
+	recordsNextOffset *int64
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	metricRegistry := pd.metricRegistry()
+	var sizeMetric metrics.Histogram
+	if metricRegistry != nil {
+		sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry)
+	}
+
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	b.HighWaterMarkOffset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 4 {
+		b.LastStableOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		if version >= 5 {
+			b.LogStartOffset, err = pd.getInt64()
+			if err != nil {
+				return err
+			}
+		}
+
+		numTransact, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numTransact >= 0 {
+			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+		}
+
+		for i := 0; i < numTransact; i++ {
+			transact := new(AbortedTransaction)
+			if err = transact.decode(pd); err != nil {
+				return err
+			}
+			b.AbortedTransactions[i] = transact
+		}
+	}
+
+	if version >= 11 {
+		b.PreferredReadReplica, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.PreferredReadReplica = -1
+	}
+
+	recordsSize, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if sizeMetric != nil {
+		sizeMetric.Update(int64(recordsSize))
+	}
+
+	recordsDecoder, err := pd.getSubset(int(recordsSize))
+	if err != nil {
+		return err
+	}
+
+	b.RecordsSet = []*Records{}
+
+	for recordsDecoder.remaining() > 0 {
+		records := &Records{}
+		if err := records.decode(recordsDecoder); err != nil {
+			// If we have at least one decoded records, this is not an error
+			if errors.Is(err, ErrInsufficientData) {
+				if len(b.RecordsSet) == 0 {
+					b.Partial = true
+				}
+				break
+			}
+			return err
+		}
+
+		b.recordsNextOffset, err = records.nextOffset()
+		if err != nil {
+			return err
+		}
+
+		partial, err := records.isPartial()
+		if err != nil {
+			return err
+		}
+
+		n, err := records.numRecords()
+		if err != nil {
+			return err
+		}
+
+		if n > 0 || (partial && len(b.RecordsSet) == 0) {
+			b.RecordsSet = append(b.RecordsSet, records)
+
+			if b.Records == nil {
+				b.Records = records
+			}
+		}
+
+		overflow, err := records.isOverflow()
+		if err != nil {
+			return err
+		}
+
+		if partial || overflow {
+			break
+		}
+	}
+
+	return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+	sum := 0
+
+	for _, records := range b.RecordsSet {
+		count, err := records.numRecords()
+		if err != nil {
+			return 0, err
+		}
+
+		sum += count
+	}
+
+	return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+	if b.Partial {
+		return true, nil
+	}
+
+	if len(b.RecordsSet) == 1 {
+		return b.RecordsSet[0].isPartial()
+	}
+
+	return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+
+	pe.putInt64(b.HighWaterMarkOffset)
+
+	if version >= 4 {
+		pe.putInt64(b.LastStableOffset)
+
+		if version >= 5 {
+			pe.putInt64(b.LogStartOffset)
+		}
+
+		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+			return err
+		}
+		for _, transact := range b.AbortedTransactions {
+			if err = transact.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	if version >= 11 {
+		pe.putInt32(b.PreferredReadReplica)
+	}
+
+	pe.push(&lengthField{})
+	for _, records := range b.RecordsSet {
+		err = records.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return pe.pop()
+}
+
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+	at := b.AbortedTransactions
+	sort.Slice(
+		at,
+		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+	)
+	return at
+}
+
+type FetchResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration in milliseconds for which the request
+	// was throttled due to a quota violation, or zero if the request did not
+	// violate any quota.
+	ThrottleTime time.Duration
+	// ErrorCode contains the top level response error code.
+	ErrorCode int16
+	// SessionID contains the fetch session ID, or 0 if this is not part of a fetch session.
+	SessionID int32
+	// Blocks contains the response topics.
+	Blocks map[string]map[int32]*FetchResponseBlock
+
+	LogAppendTime bool
+	Timestamp     time.Time
+}
+
+func (r *FetchResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 7 {
+		r.ErrorCode, err = pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(FetchResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putDurationMs(r.ThrottleTime)
+	}
+
+	if r.Version >= 7 {
+		pe.putInt16(r.ErrorCode)
+		pe.putInt32(r.SessionID)
+	}
+
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+
+		for id, block := range partitions {
+			pe.putInt32(id)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *FetchResponse) key() int16 {
+	return apiKeyFetch
+}
+
+func (r *FetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *FetchResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *FetchResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 11:
+		return V2_3_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 6:
+		return V1_0_0_0
+	case 4, 5:
+		return V0_11_0_0
+	case 3:
+		return V0_10_1_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *FetchResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+	frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+
+	return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+	var kb []byte
+	var vb []byte
+	if key != nil {
+		kb, _ = key.Encode()
+	}
+	if value != nil {
+		vb, _ = value.Encode()
+	}
+
+	return kb, vb
+}
+
+func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if r.LogAppendTime {
+		timestamp = r.Timestamp
+	}
+	msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
+	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newLegacyRecords(&MessageSet{})
+		frb.RecordsSet = []*Records{&records}
+	}
+	set := frb.RecordsSet[0].MsgSet
+	set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+}
+
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+
+	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: isTransactional,
+	}
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+	records.RecordBatch = batch
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+
+	// batch
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: true,
+		Control:         true,
+	}
+
+	// records
+	records := newDefaultRecords(nil)
+	records.RecordBatch = batch
+
+	// record
+	crAbort := ControlRecord{
+		Version: 0,
+		Type:    recordType,
+	}
+	crKey := &realEncoder{raw: make([]byte, 4)}
+	crValue := &realEncoder{raw: make([]byte, 6)}
+	crAbort.encode(crKey, crValue)
+	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
+}
+
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+	// define controlRecord key and value
+	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+	frb := r.getOrCreateBlock(topic, partition)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..04ffc6f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+	CoordinatorGroup CoordinatorType = iota
+	CoordinatorTransaction
+)
+
+type FindCoordinatorRequest struct {
+	Version         int16
+	CoordinatorKey  string
+	CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) setVersion(v int16) {
+	f.Version = v
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(f.CoordinatorKey); err != nil {
+		return err
+	}
+
+	if f.Version >= 1 {
+		pe.putInt8(int8(f.CoordinatorType))
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+	if f.CoordinatorKey, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		f.Version = version
+		coordinatorType, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		f.CoordinatorType = CoordinatorType(coordinatorType)
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+	return f.Version
+}
+
+func (r *FindCoordinatorRequest) headerVersion() int16 {
+	return 1
+}
+
+func (f *FindCoordinatorRequest) isValidVersion() bool {
+	return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..1fb8a28
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_response.go
@@ -0,0 +1,107 @@
+package sarama
+
+import (
+	"time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	Coordinator  *Broker
+}
+
+func (f *FindCoordinatorResponse) setVersion(v int16) {
+	f.Version = v
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		f.Version = version
+
+		if f.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	f.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		if f.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	coordinator := new(Broker)
+	// The version is hardcoded to 0, as version 1 of the Broker-decode
+	// contains the rack-field which is not present in the FindCoordinatorResponse.
+	if err := coordinator.decode(pd, 0); err != nil {
+		return err
+	}
+	if coordinator.addr == ":0" {
+		return nil
+	}
+	f.Coordinator = coordinator
+
+	return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+	if f.Version >= 1 {
+		pe.putDurationMs(f.ThrottleTime)
+	}
+
+	pe.putKError(f.Err)
+
+	if f.Version >= 1 {
+		if err := pe.putNullableString(f.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	coordinator := f.Coordinator
+	if coordinator == nil {
+		coordinator = NoNode
+	}
+	if err := coordinator.encode(pe, 0); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+	return f.Version
+}
+
+func (r *FindCoordinatorResponse) headerVersion() int16 {
+	return 0
+}
+
+func (f *FindCoordinatorResponse) isValidVersion() bool {
+	return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
+
+func (r *FindCoordinatorResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..93ea921
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
@@ -0,0 +1,321 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	TOK_ID_KRB_AP_REQ   = 256
+	GSS_API_GENERIC_TAG = 0x60
+	KRB5_USER_AUTH      = 1
+	KRB5_KEYTAB_AUTH    = 2
+	KRB5_CCACHE_AUTH    = 3
+	GSS_API_INITIAL     = 1
+	GSS_API_VERIFY      = 2
+	GSS_API_FINISH      = 3
+)
+
+type GSSAPIConfig struct {
+	AuthType           int
+	KeyTabPath         string
+	CCachePath         string
+	KerberosConfigPath string
+	ServiceName        string
+	Username           string
+	Password           string
+	Realm              string
+	DisablePAFXFAST    bool
+	BuildSpn           BuildSpnFunc
+}
+
+type GSSAPIKerberosAuth struct {
+	Config                *GSSAPIConfig
+	ticket                messages.Ticket
+	encKey                types.EncryptionKey
+	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+	step                  int
+}
+
+type KerberosClient interface {
+	Login() error
+	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+	Domain() string
+	CName() types.PrincipalName
+	Destroy()
+}
+
+type BuildSpnFunc func(serviceName, host string) string
+
+// writePackage appends length in big endian before the payload, and sends it to kafka
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+	length := uint64(len(payload))
+	size := length + 4 // 4 byte length header + payload
+	if size > math.MaxInt32 {
+		return 0, errors.New("payload too large, will overflow int32")
+	}
+	finalPackage := make([]byte, size)
+	copy(finalPackage[4:], payload)
+	binary.BigEndian.PutUint32(finalPackage, uint32(length))
+	bytes, err := broker.conn.Write(finalPackage)
+	if err != nil {
+		return bytes, err
+	}
+	return bytes, nil
+}
+
+// readPackage reads payload length (4 bytes) and then reads the payload into []byte
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+	bytesRead := 0
+	lengthInBytes := make([]byte, 4)
+	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+	if err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += bytes
+	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+	payloadBytes := make([]byte, payloadLength)         // buffer for read..
+	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+	if err != nil {
+		return payloadBytes, bytesRead, err
+	}
+	bytesRead += bytes
+	return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+	a := make([]byte, 24)
+	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+	binary.LittleEndian.PutUint32(a[:4], 16)
+	for _, i := range flags {
+		f := binary.LittleEndian.Uint32(a[20:24])
+		f |= uint32(i)
+		binary.LittleEndian.PutUint32(a[20:24], f)
+	}
+	return a
+}
+
+// Construct Kerberos AP_REQ package, conforming to RFC-4120
+// https://tools.ietf.org/html/rfc4120#page-84
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+	domain string,
+	cname types.PrincipalName,
+	ticket messages.Ticket,
+	sessionKey types.EncryptionKey,
+) ([]byte, error) {
+	auth, err := types.NewAuthenticator(domain, cname)
+	if err != nil {
+		return nil, err
+	}
+	auth.Cksum = types.Checksum{
+		CksumType: chksumtype.GSSAPI,
+		Checksum:  krbAuth.newAuthenticatorChecksum(),
+	}
+	APReq, err := messages.NewAPReq(
+		ticket,
+		sessionKey,
+		auth,
+	)
+	if err != nil {
+		return nil, err
+	}
+	aprBytes := make([]byte, 2)
+	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+	tb, err := APReq.Marshal()
+	if err != nil {
+		return nil, err
+	}
+	aprBytes = append(aprBytes, tb...)
+	return aprBytes, nil
+}
+
+// Append the GSS-API header to the payload, conforming to RFC-2743
+// Section 3.1, Mechanism-Independent Token Format
+//
+// https://tools.ietf.org/html/rfc2743#page-81
+//
+// GSSAPIHeader + <specific mechanism payload>
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
+	if err != nil {
+		return nil, err
+	}
+	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+	GSSHeader = append(GSSHeader, oidBytes...)
+	GSSPackage := append(GSSHeader, payload...)
+	return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(
+	client KerberosClient,
+	bytes []byte,
+) ([]byte, error) {
+	switch krbAuth.step {
+	case GSS_API_INITIAL:
+		aprBytes, err := krbAuth.createKrb5Token(
+			client.Domain(),
+			client.CName(),
+			krbAuth.ticket,
+			krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_VERIFY
+		return krbAuth.appendGSSAPIHeader(aprBytes)
+	case GSS_API_VERIFY:
+		wrapTokenReq := gssapi.WrapToken{}
+		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+			return nil, err
+		}
+		// Validate response.
+		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if !isValid {
+			return nil, err
+		}
+
+		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_FINISH
+		return wrapTokenResponse.Marshal()
+	}
+	return nil, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) spn(broker *Broker) string {
+	host, _, _ := net.SplitHostPort(broker.addr)
+	var spn string
+	if krbAuth.Config.BuildSpn != nil {
+		spn = krbAuth.Config.BuildSpn(broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+	} else {
+		spn = fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+	}
+	return spn
+}
+
+// Login will use the given KerberosClient to login and get a ticket for the given spn.
+func (krbAuth *GSSAPIKerberosAuth) Login(
+	client KerberosClient,
+	spn string,
+) (*messages.Ticket, error) {
+	if err := client.Login(); err != nil {
+		Logger.Printf("Kerberos client login error: %s", err)
+		return nil, err
+	}
+
+	ticket, encKey, err := client.GetServiceTicket(spn)
+	if err != nil {
+		Logger.Printf("Kerberos service ticket error for %s: %s", spn, err)
+		return nil, err
+	}
+	krbAuth.ticket = ticket
+	krbAuth.encKey = encKey
+	krbAuth.step = GSS_API_INITIAL
+
+	return &ticket, nil
+}
+
+// Authorize performs the kerberos auth handshake for authorization
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+	client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client initialization error: %s", err)
+		return err
+	}
+	defer client.Destroy()
+
+	ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+	if err != nil {
+		return err
+	}
+
+	principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+	var receivedBytes []byte
+
+	for {
+		packBytes, err := krbAuth.initSecContext(client, receivedBytes)
+		if err != nil {
+			Logger.Printf("Kerberos init error as %s: %s", principal, err)
+			return err
+		}
+
+		requestTime := time.Now()
+		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+		if err != nil {
+			Logger.Printf("Kerberos write error as %s: %s", principal, err)
+			return err
+		}
+		broker.updateOutgoingCommunicationMetrics(bytesWritten)
+
+		switch krbAuth.step {
+		case GSS_API_VERIFY:
+			var bytesRead int
+			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+			requestLatency := time.Since(requestTime)
+			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+			if err != nil {
+				Logger.Printf("Kerberos read error as %s: %s", principal, err)
+				return err
+			}
+		case GSS_API_FINISH:
+			return nil
+		}
+	}
+}
+
+// AuthorizeV2 performs the SASL v2 GSSAPI authentication with the Kafka broker.
+func (krbAuth *GSSAPIKerberosAuth) AuthorizeV2(
+	broker *Broker,
+	authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error),
+) error {
+	client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client initialization error: %s", err)
+		return err
+	}
+	defer client.Destroy()
+
+	ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+	if err != nil {
+		return err
+	}
+
+	principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+	var receivedBytes []byte
+
+	for {
+		token, err := krbAuth.initSecContext(client, receivedBytes)
+		if err != nil {
+			Logger.Printf("SASL Kerberos init error as %s: %s", principal, err)
+			return err
+		}
+
+		authResponse, err := authSendReceiver(token)
+		if err != nil {
+			Logger.Printf("SASL Kerberos authenticate error as %s: %s", principal, err)
+			return err
+		}
+
+		receivedBytes = authResponse.SaslAuthBytes
+
+		if krbAuth.step == GSS_API_FINISH {
+			return nil
+		}
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go
new file mode 100644
index 0000000..21b9971
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+type HeartbeatRequest struct {
+	Version         int16
+	GroupId         string
+	GenerationId    int32
+	MemberId        string
+	GroupInstanceId *string
+}
+
+func (r *HeartbeatRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.Version >= 3 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *HeartbeatRequest) key() int16 {
+	return apiKeyHeartbeat
+}
+
+func (r *HeartbeatRequest) version() int16 {
+	return r.Version
+}
+
+func (r *HeartbeatRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *HeartbeatRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go
new file mode 100644
index 0000000..d753957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+import "time"
+
+type HeartbeatResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+}
+
+func (r *HeartbeatResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+	var err error
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *HeartbeatResponse) key() int16 {
+	return apiKeyHeartbeat
+}
+
+func (r *HeartbeatResponse) version() int16 {
+	return r.Version
+}
+
+func (r *HeartbeatResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *HeartbeatResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *HeartbeatResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
new file mode 100644
index 0000000..e048684
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
@@ -0,0 +1,205 @@
+package sarama
+
+type IncrementalAlterConfigsOperation int8
+
+const (
+	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
+	IncrementalAlterConfigsOperationDelete
+	IncrementalAlterConfigsOperationAppend
+	IncrementalAlterConfigsOperationSubtract
+)
+
+// IncrementalAlterConfigsRequest is an incremental alter config request type
+type IncrementalAlterConfigsRequest struct {
+	Version      int16
+	Resources    []*IncrementalAlterConfigsResource
+	ValidateOnly bool
+}
+
+func (a *IncrementalAlterConfigsRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+type IncrementalAlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]IncrementalAlterConfigsEntry
+}
+
+type IncrementalAlterConfigsEntry struct {
+	Operation IncrementalAlterConfigsOperation
+	Value     *string
+}
+
+func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &IncrementalAlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+
+	for name, e := range a.ConfigEntries {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
+		for i := 0; i < n; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			var v IncrementalAlterConfigsEntry
+
+			if err := v.decode(pd, version); err != nil {
+				return err
+			}
+
+			a.ConfigEntries[name] = v
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Operation))
+
+	if err := pe.putNullableString(a.Value); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = IncrementalAlterConfigsOperation(t)
+
+	s, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	a.Value = s
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsRequest) key() int16 {
+	return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsRequest) version() int16 {
+	return a.Version
+}
+
+func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
+	if a.Version >= 1 {
+		return 2
+	}
+	return 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexible() bool {
+	return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_4_0_0
+	default:
+		return V2_3_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
new file mode 100644
index 0000000..9333785
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+// IncrementalAlterConfigsResponse is a response type for incremental alter config
+type IncrementalAlterConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *IncrementalAlterConfigsResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsResponse) key() int16 {
+	return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsResponse) version() int16 {
+	return a.Version
+}
+
+func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
+	if a.Version >= 1 {
+		return 1
+	}
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexible() bool {
+	return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_4_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..a5d6d26
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+	Version            int16
+	TransactionalID    *string
+	TransactionTimeout time.Duration
+	ProducerID         int64
+	ProducerEpoch      int16
+}
+
+func (i *InitProducerIDRequest) setVersion(v int16) {
+	i.Version = v
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+	if err := pe.putNullableString(i.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+	if i.Version >= 3 {
+		pe.putInt64(i.ProducerID)
+		pe.putInt16(i.ProducerEpoch)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+	i.Version = version
+	if i.TransactionalID, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+	if i.Version >= 3 {
+		if i.ProducerID, err = pd.getInt64(); err != nil {
+			return err
+		}
+
+		if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+	return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+	return i.Version
+}
+
+func (i *InitProducerIDRequest) headerVersion() int16 {
+	if i.Version >= 2 {
+		return 2
+	}
+
+	return 1
+}
+
+func (i *InitProducerIDRequest) isValidVersion() bool {
+	return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDRequest) isFlexible() bool {
+	return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+	switch i.Version {
+	case 4:
+		return V2_7_0_0
+	case 3:
+		return V2_5_0_0
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..0e84c43
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_response.go
@@ -0,0 +1,93 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+	ThrottleTime  time.Duration
+	Err           KError
+	Version       int16
+	ProducerID    int64
+	ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) setVersion(v int16) {
+	i.Version = v
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(i.ThrottleTime)
+	pe.putKError(i.Err)
+	pe.putInt64(i.ProducerID)
+	pe.putInt16(i.ProducerEpoch)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+	i.Version = version
+	if i.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	i.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if i.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+	return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+	return i.Version
+}
+
+func (i *InitProducerIDResponse) headerVersion() int16 {
+	if i.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (i *InitProducerIDResponse) isValidVersion() bool {
+	return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDResponse) isFlexible() bool {
+	return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+	switch i.Version {
+	case 4:
+		return V2_7_0_0
+	case 3:
+		return V2_5_0_0
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *InitProducerIDResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go
new file mode 100644
index 0000000..d4dc23c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/interceptors.go
@@ -0,0 +1,43 @@
+package sarama
+
+// ProducerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the producer before they are published to the Kafka cluster.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ProducerInterceptor interface {
+
+	// OnSend is called when the producer message is intercepted. Please avoid
+	// modifying the message until it's safe to do so, as this is _not_ a copy
+	// of the message.
+	OnSend(*ProducerMessage)
+}
+
+// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the consumer before they are sent to the messages channel.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ConsumerInterceptor interface {
+
+	// OnConsume is called when the consumed message is intercepted. Please
+	// avoid modifying the message until it's safe to do so, as this is _not_ a
+	// copy of the message.
+	OnConsume(*ConsumerMessage)
+}
+
+func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling producer interceptor: %v, %v", interceptor, r)
+		}
+	}()
+
+	interceptor.OnSend(msg)
+}
+
+func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling consumer interceptor: %v, %v", interceptor, r)
+		}
+	}()
+
+	interceptor.OnConsume(msg)
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go
new file mode 100644
index 0000000..e8cbc46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_request.go
@@ -0,0 +1,234 @@
+package sarama
+
+type GroupProtocol struct {
+	// Name contains the protocol name.
+	Name string
+	// Metadata contains the protocol metadata.
+	Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+	p.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	p.Metadata, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(p.Name); err != nil {
+		return err
+	}
+	if err := pe.putBytes(p.Metadata); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type JoinGroupRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// GroupId contains the group identifier.
+	GroupId string
+	// SessionTimeout specifies that the coordinator should consider the consumer
+	// dead if it receives no heartbeat after this timeout in milliseconds.
+	SessionTimeout int32
+	// RebalanceTimeout contains the maximum time in milliseconds that the
+	// coordinator will wait for each member to rejoin when rebalancing the
+	// group.
+	RebalanceTimeout int32
+	// MemberId contains the member id assigned by the group coordinator.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// ProtocolType contains the unique name the for class of protocols
+	// implemented by the group we want to join.
+	ProtocolType string
+	// GroupProtocols contains the list of protocols that the member supports.
+	// deprecated; use OrderedGroupProtocols
+	GroupProtocols map[string][]byte
+	// OrderedGroupProtocols contains an ordered list of protocols that the member
+	// supports.
+	OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	pe.putInt32(r.SessionTimeout)
+	if r.Version >= 1 {
+		pe.putInt32(r.RebalanceTimeout)
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+	if r.Version >= 5 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+	if err := pe.putString(r.ProtocolType); err != nil {
+		return err
+	}
+
+	if len(r.GroupProtocols) > 0 {
+		if len(r.OrderedGroupProtocols) > 0 {
+			return PacketEncodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+		}
+
+		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+			return err
+		}
+		for name, metadata := range r.GroupProtocols {
+			if err := pe.putString(name); err != nil {
+				return err
+			}
+			if err := pe.putBytes(metadata); err != nil {
+				return err
+			}
+			pe.putEmptyTaggedFieldArray()
+		}
+	} else {
+		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+			return err
+		}
+		for _, protocol := range r.OrderedGroupProtocols {
+			if err := protocol.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.SessionTimeout, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if version >= 1 {
+		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if version >= 5 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return
+		}
+	}
+
+	if r.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupProtocols = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		protocol := &GroupProtocol{}
+		if err := protocol.decode(pd); err != nil {
+			return err
+		}
+		r.GroupProtocols[protocol.Name] = protocol.Metadata
+		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *JoinGroupRequest) key() int16 {
+	return apiKeyJoinGroup
+}
+
+func (r *JoinGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+	return 1
+}
+
+func (r *JoinGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_3_0_0
+	case 4:
+		return V2_2_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+		Name:     name,
+		Metadata: metadata,
+	})
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+	bin, err := encode(metadata, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupProtocol(name, bin)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go
new file mode 100644
index 0000000..bdabe0a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_response.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "time"
+
+type JoinGroupResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration for which the request was throttled due
+	// to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTime int32
+	// Err contains the error code, or 0 if there was no error.
+	Err KError
+	// GenerationId contains the generation ID of the group.
+	GenerationId int32
+	// GroupProtocol contains the group protocol selected by the coordinator.
+	GroupProtocol string
+	// LeaderId contains the leader of the group.
+	LeaderId string
+	// MemberId contains the member ID assigned by the group coordinator.
+	MemberId string
+	// Members contains the per-group-member information.
+	Members []GroupMember
+}
+
+func (r *JoinGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type GroupMember struct {
+	// MemberId contains the group member ID.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// Metadata contains the group member metadata.
+	Metadata []byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+	for _, member := range r.Members {
+		meta := new(ConsumerGroupMemberMetadata)
+		if err := decode(member.Metadata, meta, nil); err != nil {
+			return nil, err
+		}
+		members[member.MemberId] = *meta
+	}
+	return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.GroupProtocol); err != nil {
+		return err
+	}
+	if err := pe.putString(r.LeaderId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Members)); err != nil {
+		return err
+	}
+
+	for _, member := range r.Members {
+		if err := pe.putString(member.MemberId); err != nil {
+			return err
+		}
+		if r.Version >= 5 {
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+		}
+		if err := pe.putBytes(member.Metadata); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 2 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return
+		}
+	}
+
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.GroupProtocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.LeaderId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		_, err = pd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	r.Members = make([]GroupMember, n)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		var groupInstanceId *string = nil
+		if r.Version >= 5 {
+			groupInstanceId, err = pd.getNullableString()
+			if err != nil {
+				return err
+			}
+		}
+
+		memberMetadata, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *JoinGroupResponse) key() int16 {
+	return apiKeyJoinGroup
+}
+
+func (r *JoinGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+	return 0
+}
+
+func (r *JoinGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_3_0_0
+	case 4:
+		return V2_2_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *JoinGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go
new file mode 100644
index 0000000..2891268
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/kerberos_client.go
@@ -0,0 +1,57 @@
+package sarama
+
+import (
+	krb5client "github.com/jcmturner/gokrb5/v8/client"
+	krb5config "github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type KerberosGoKrb5Client struct {
+	krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+	return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+	return c.Credentials.CName()
+}
+
+// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
+// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
+// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+	cfg, err := krb5config.Load(config.KerberosConfigPath)
+	if err != nil {
+		return nil, err
+	}
+	return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+	var client *krb5client.Client
+	switch config.AuthType {
+	case KRB5_KEYTAB_AUTH:
+		kt, err := keytab.Load(config.KeyTabPath)
+		if err != nil {
+			return nil, err
+		}
+		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+	case KRB5_CCACHE_AUTH:
+		cc, err := credentials.LoadCCache(config.CCachePath)
+		if err != nil {
+			return nil, err
+		}
+		client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+		if err != nil {
+			return nil, err
+		}
+	default:
+		client = krb5client.NewWithPassword(config.Username,
+			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+	}
+	return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go
new file mode 100644
index 0000000..377bdc5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type MemberIdentity struct {
+	MemberId        string
+	GroupInstanceId *string
+}
+
+type LeaveGroupRequest struct {
+	Version  int16
+	GroupId  string
+	MemberId string           // Removed in Version 3
+	Members  []MemberIdentity // Added in Version 3
+}
+
+func (r *LeaveGroupRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	if r.Version < 3 {
+		if err := pe.putString(r.MemberId); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 3 {
+		if err := pe.putArrayLength(len(r.Members)); err != nil {
+			return err
+		}
+		for _, member := range r.Members {
+			if err := pe.putString(member.MemberId); err != nil {
+				return err
+			}
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.Version < 3 {
+		if r.MemberId, err = pd.getString(); err != nil {
+			return
+		}
+	}
+	if r.Version >= 3 {
+		memberCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Members = make([]MemberIdentity, memberCount)
+		for i := 0; i < memberCount; i++ {
+			memberIdentity := MemberIdentity{}
+			if memberIdentity.MemberId, err = pd.getString(); err != nil {
+				return err
+			}
+			if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			r.Members[i] = memberIdentity
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+	return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *LeaveGroupRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *LeaveGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go
new file mode 100644
index 0000000..5700992
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_response.go
@@ -0,0 +1,129 @@
+package sarama
+
+import "time"
+
+type MemberResponse struct {
+	MemberId        string
+	GroupInstanceId *string
+	Err             KError
+}
+type LeaveGroupResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+	Members      []MemberResponse
+}
+
+func (r *LeaveGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	if r.Version >= 3 {
+		if err := pe.putArrayLength(len(r.Members)); err != nil {
+			return err
+		}
+		for _, member := range r.Members {
+			if err := pe.putString(member.MemberId); err != nil {
+				return err
+			}
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+			pe.putKError(member.Err)
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.Version >= 3 {
+		membersLen, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Members = make([]MemberResponse, membersLen)
+		for i := 0; i < len(r.Members); i++ {
+			if r.Members[i].MemberId, err = pd.getString(); err != nil {
+				return err
+			}
+			if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if r.Members[i].Err, err = pd.getKError(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+	return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *LeaveGroupResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *LeaveGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *LeaveGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/length_field.go
rename to vendor/github.com/IBM/sarama/length_field.go
diff --git a/vendor/github.com/IBM/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go
new file mode 100644
index 0000000..b02e331
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type ListGroupsRequest struct {
+	Version      int16
+	StatesFilter []string // version 4 or later
+	TypesFilter  []string // version 5 or later
+}
+
+func (r *ListGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+	if r.Version >= 4 {
+		if err := pe.putArrayLength(len(r.StatesFilter)); err != nil {
+			return err
+		}
+		for _, filter := range r.StatesFilter {
+			err := pe.putString(filter)
+			if err != nil {
+				return err
+			}
+		}
+		if r.Version >= 5 {
+			if err := pe.putArrayLength(len(r.TypesFilter)); err != nil {
+				return err
+			}
+			for _, filter := range r.TypesFilter {
+				err := pe.putString(filter)
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 4 {
+		if r.StatesFilter, err = pd.getStringArray(); err != nil {
+			return err
+		}
+		if r.Version >= 5 {
+			if r.TypesFilter, err = pd.getStringArray(); err != nil {
+				return err
+			}
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListGroupsRequest) key() int16 {
+	return apiKeyListGroups
+}
+
+func (r *ListGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListGroupsRequest) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 2
+	}
+	return 1
+}
+
+func (r *ListGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V3_8_0_0
+	case 4:
+		return V2_6_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_6_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go
new file mode 100644
index 0000000..7e3a55f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_response.go
@@ -0,0 +1,165 @@
+package sarama
+
+type ListGroupsResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+	Groups       map[string]string
+	GroupsData   map[string]GroupData // version 4 or later
+}
+
+func (r *ListGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type GroupData struct {
+	GroupState string // version 4 or later
+	GroupType  string // version 5 or later
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+
+	pe.putKError(r.Err)
+
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+	for groupId, protocolType := range r.Groups {
+		if err := pe.putString(groupId); err != nil {
+			return err
+		}
+		if err := pe.putString(protocolType); err != nil {
+			return err
+		}
+		if r.Version >= 4 {
+			groupData := r.GroupsData[groupId]
+			if err := pe.putString(groupData.GroupState); err != nil {
+				return err
+			}
+		}
+
+		if r.Version >= 5 {
+			groupData := r.GroupsData[groupId]
+			if err := pe.putString(groupData.GroupType); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i < n; i++ {
+		if i == 0 {
+			r.Groups = make(map[string]string)
+			if r.Version >= 4 {
+				r.GroupsData = make(map[string]GroupData)
+			}
+		}
+
+		var groupId, protocolType string
+		groupId, err = pd.getString()
+		if err != nil {
+			return err
+		}
+		protocolType, err = pd.getString()
+		if err != nil {
+			return err
+		}
+
+		r.Groups[groupId] = protocolType
+
+		if r.Version >= 4 {
+			var groupData GroupData
+			groupState, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			groupData.GroupState = groupState
+			if r.Version >= 5 {
+				groupType, err := pd.getString()
+				if err != nil {
+					return err
+				}
+				groupData.GroupType = groupType
+			}
+			r.GroupsData[groupId] = groupData
+		}
+
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListGroupsResponse) key() int16 {
+	return apiKeyListGroups
+}
+
+func (r *ListGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListGroupsResponse) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 1
+	}
+	return 0
+}
+
+func (r *ListGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V3_8_0_0
+	case 4:
+		return V2_6_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_6_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
new file mode 100644
index 0000000..25b1b29
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
@@ -0,0 +1,113 @@
+package sarama
+
+type ListPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string][]int32
+	Version   int16
+}
+
+func (r *ListPartitionReassignmentsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.blocks[topic][j] = partition
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListPartitionReassignmentsRequest) key() int16 {
+	return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ListPartitionReassignmentsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = partitionIDs
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
new file mode 100644
index 0000000..8ade1e4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
@@ -0,0 +1,188 @@
+package sarama
+
+import "time"
+
+type PartitionReplicaReassignmentsStatus struct {
+	Replicas         []int32
+	AddingReplicas   []int32
+	RemovingReplicas []int32
+}
+
+func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
+	if err := pe.putInt32Array(b.Replicas); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(b.AddingReplicas); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(b.RemovingReplicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
+	if b.Replicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	if b.AddingReplicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	if b.RemovingReplicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ListPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
+}
+
+func (r *ListPartitionReassignmentsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
+	if r.TopicStatus == nil {
+		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
+	}
+	partitions := r.TopicStatus[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
+		r.TopicStatus[topic] = partitions
+	}
+
+	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
+}
+
+func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.TopicStatus)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.TopicStatus {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		ongoingPartitionReassignments, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
+
+		for j := 0; j < ongoingPartitionReassignments; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := &PartitionReplicaReassignmentsStatus{}
+			if err := block.decode(pd); err != nil {
+				return err
+			}
+			r.TopicStatus[topic][partition] = block
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListPartitionReassignmentsResponse) key() int16 {
+	return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ListPartitionReassignmentsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/message.go b/vendor/github.com/IBM/sarama/message.go
new file mode 100644
index 0000000..c6f35a3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message.go
@@ -0,0 +1,190 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+const (
+	// CompressionNone no compression
+	CompressionNone CompressionCodec = iota
+	// CompressionGZIP compression using GZIP
+	CompressionGZIP
+	// CompressionSnappy compression using snappy
+	CompressionSnappy
+	// CompressionLZ4 compression using LZ4
+	CompressionLZ4
+	// CompressionZSTD compression using ZSTD
+	CompressionZSTD
+
+	// The lowest 3 bits contain the compression codec used for the message
+	compressionCodecMask int8 = 0x07
+
+	// Bit 3 set for "LogAppend" timestamps
+	timestampTypeMask = 0x08
+
+	// CompressionLevelDefault is the constant to use in CompressionLevel
+	// to have the default compression level for any codec. The value is picked
+	// that we don't use any existing compression levels.
+	CompressionLevelDefault = -1000
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+func (cc CompressionCodec) String() string {
+	return []string{
+		"none",
+		"gzip",
+		"snappy",
+		"lz4",
+		"zstd",
+	}[int(cc)]
+}
+
+// UnmarshalText returns a CompressionCodec from its string representation.
+func (cc *CompressionCodec) UnmarshalText(text []byte) error {
+	codecs := map[string]CompressionCodec{
+		"none":   CompressionNone,
+		"gzip":   CompressionGZIP,
+		"snappy": CompressionSnappy,
+		"lz4":    CompressionLZ4,
+		"zstd":   CompressionZSTD,
+	}
+	codec, ok := codecs[string(text)]
+	if !ok {
+		return fmt.Errorf("cannot parse %q as a compression codec", string(text))
+	}
+	*cc = codec
+	return nil
+}
+
+// MarshalText transforms a CompressionCodec into its string representation.
+func (cc CompressionCodec) MarshalText() ([]byte, error) {
+	return []byte(cc.String()), nil
+}
+
+// Message is a kafka message type
+type Message struct {
+	Codec            CompressionCodec // codec used to compress the message contents
+	CompressionLevel int              // compression level
+	LogAppendTime    bool             // the used timestamp is LogAppendTime
+	Key              []byte           // the message key, may be nil
+	Value            []byte           // the message contents
+	Set              *MessageSet      // the message set a message might wrap
+	Version          int8             // v1 requires Kafka 0.10
+	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
+
+	compressedCache []byte
+	compressedSize  int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+	pe.push(newCRC32Field(crcIEEE))
+
+	pe.putInt8(m.Version)
+
+	attributes := int8(m.Codec) & compressionCodecMask
+	if m.LogAppendTime {
+		attributes |= timestampTypeMask
+	}
+	pe.putInt8(attributes)
+
+	if m.Version >= 1 {
+		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+			return err
+		}
+	}
+
+	err := pe.putBytes(m.Key)
+	if err != nil {
+		return err
+	}
+
+	var payload []byte
+
+	if m.compressedCache != nil {
+		payload = m.compressedCache
+		m.compressedCache = nil
+	} else if m.Value != nil {
+		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+		if err != nil {
+			return err
+		}
+		m.compressedCache = payload
+		// Keep in mind the compressed payload size for metric gathering
+		m.compressedSize = len(payload)
+	}
+
+	if err = pe.putBytes(payload); err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+	crc32Decoder := acquireCrc32Field(crcIEEE)
+	defer releaseCrc32Field(crc32Decoder)
+
+	err = pd.push(crc32Decoder)
+	if err != nil {
+		return err
+	}
+
+	m.Version, err = pd.getInt8()
+	if err != nil {
+		return err
+	}
+
+	if m.Version > 1 {
+		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+	}
+
+	attribute, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	m.Codec = CompressionCodec(attribute & compressionCodecMask)
+	m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
+
+	if m.Version == 1 {
+		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+			return err
+		}
+	}
+
+	m.Key, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	m.Value, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	// Required for deep equal assertion during tests but might be useful
+	// for future metrics about the compression ratio in fetch requests
+	m.compressedSize = len(m.Value)
+
+	if m.Value != nil && m.Codec != CompressionNone {
+		m.Value, err = decompress(m.Codec, m.Value)
+		if err != nil {
+			return err
+		}
+
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	}
+
+	return pd.pop()
+}
+
+// decodes a message set from a previously encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+	pd := realDecoder{raw: m.Value}
+	m.Set = &MessageSet{}
+	return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/IBM/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go
new file mode 100644
index 0000000..5bc2112
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message_set.go
@@ -0,0 +1,112 @@
+package sarama
+
+import "errors"
+
+type MessageBlock struct {
+	Offset int64
+	Msg    *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+	if msb.Msg.Set != nil {
+		return msb.Msg.Set.Messages
+	}
+	return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+	pe.putInt64(msb.Offset)
+	pe.push(&lengthField{})
+	err := msb.Msg.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+	if msb.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	lengthDecoder := acquireLengthField()
+	defer releaseLengthField(lengthDecoder)
+
+	if err = pd.push(lengthDecoder); err != nil {
+		return err
+	}
+
+	msb.Msg = new(Message)
+	if err = msb.Msg.decode(pd); err != nil {
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type MessageSet struct {
+	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+	OverflowMessage        bool // whether the set on the wire contained an overflow message
+	Messages               []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+	for i := range ms.Messages {
+		err := ms.Messages[i].encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+	ms.Messages = nil
+
+	for pd.remaining() > 0 {
+		magic, err := magicValue(pd)
+		if err != nil {
+			if errors.Is(err, ErrInsufficientData) {
+				ms.PartialTrailingMessage = true
+				return nil
+			}
+			return err
+		}
+
+		if magic > 1 {
+			return nil
+		}
+
+		msb := new(MessageBlock)
+		err = msb.decode(pd)
+		if err == nil {
+			ms.Messages = append(ms.Messages, msb)
+		} else if errors.Is(err, ErrInsufficientData) {
+			// As an optimization the server is allowed to return a partial message at the
+			// end of the message set. Clients should handle this case. So we just ignore such things.
+			if msb.Offset == -1 {
+				// This is an overflow message caused by chunked down conversion
+				ms.OverflowMessage = true
+			} else {
+				ms.PartialTrailingMessage = true
+			}
+			return nil
+		} else {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+	block := new(MessageBlock)
+	block.Msg = msg
+	ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/IBM/sarama/metadata.go b/vendor/github.com/IBM/sarama/metadata.go
new file mode 100644
index 0000000..20620b3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+	"sync"
+)
+
+type metadataRefresh func(topics []string) error
+
+// currentRefresh makes sure sarama does not issue metadata requests
+// in parallel. If we need to refresh the metadata for a list of topics,
+// this struct will check if a refresh is already ongoing, and if so, it will
+// accumulate the list of topics to refresh in the next refresh.
+// When the current refresh is over, it will queue a new metadata refresh call
+// with the accumulated list of topics.
+type currentRefresh struct {
+	// This is the function that gets called when to refresh the metadata.
+	// It is called with the list of all topics that need to be refreshed
+	// or with nil if all topics need to be refreshed.
+	refresh func(topics []string) error
+
+	mu        sync.Mutex
+	ongoing   bool
+	topicsMap map[string]struct{}
+	topics    []string
+	allTopics bool
+	chans     []chan error
+}
+
+// addTopicsFrom adds topics from the next refresh to the current refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopicsFrom(next *nextRefresh) {
+	if next.allTopics {
+		r.allTopics = true
+		return
+	}
+	if len(next.topics) > 0 {
+		r.addTopics(next.topics)
+	}
+}
+
+// nextRefresh holds the list of topics we will need
+// to refresh in the next refresh.
+// When a refresh is ongoing, calls to RefreshMetadata() are
+// accumulated in this struct, so that we can immediately issue another
+// refresh when the current refresh is over.
+type nextRefresh struct {
+	mu        sync.Mutex
+	topics    []string
+	allTopics bool
+}
+
+// addTopics adds topics to the refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopics(topics []string) {
+	if len(topics) == 0 {
+		r.allTopics = true
+		return
+	}
+	for _, topic := range topics {
+		if _, ok := r.topicsMap[topic]; ok {
+			continue
+		}
+		r.topicsMap[topic] = struct{}{}
+		r.topics = append(r.topics, topic)
+	}
+}
+
+func (r *nextRefresh) addTopics(topics []string) {
+	if len(topics) == 0 {
+		r.allTopics = true
+		// All topics are requested, so we can clear the topics
+		// that were previously accumulated.
+		r.topics = r.topics[:0]
+		return
+	}
+	r.topics = append(r.topics, topics...)
+}
+
+func (r *nextRefresh) clear() {
+	r.topics = r.topics[:0]
+	r.allTopics = false
+}
+
+func (r *currentRefresh) hasTopics(topics []string) bool {
+	if len(topics) == 0 {
+		// This means that the caller wants to know if the refresh is for all topics.
+		// In this case, we return true if the refresh is for all topics, or false if it is not.
+		return r.allTopics
+	}
+	if r.allTopics {
+		return true
+	}
+	for _, topic := range topics {
+		if _, ok := r.topicsMap[topic]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// start starts a new refresh.
+// The refresh is started in a new goroutine, and this function
+// returns a channel on which the caller can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) start() chan error {
+	r.ongoing = true
+	ch := r.wait()
+	topics := r.topics
+	if r.allTopics {
+		topics = nil
+	}
+	go func() {
+		err := r.refresh(topics)
+		r.mu.Lock()
+		defer r.mu.Unlock()
+
+		r.ongoing = false
+		for _, ch := range r.chans {
+			ch <- err
+			close(ch)
+		}
+		r.clear()
+	}()
+	return ch
+}
+
+// clear clears the refresh state.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) clear() {
+	r.topics = r.topics[:0]
+	for key := range r.topicsMap {
+		delete(r.topicsMap, key)
+	}
+	r.allTopics = false
+	r.chans = r.chans[:0]
+}
+
+// wait returns the channel on which you can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) wait() chan error {
+	if !r.ongoing {
+		panic("waiting for a refresh that is not ongoing")
+	}
+	ch := make(chan error, 1)
+	r.chans = append(r.chans, ch)
+	return ch
+}
+
+// singleFlightMetadataRefresher helps managing metadata refreshes.
+// It makes sure a sarama client never issues more than one metadata refresh
+// in parallel.
+type singleFlightMetadataRefresher struct {
+	current *currentRefresh
+	next    *nextRefresh
+}
+
+func newSingleFlightRefresher(f func(topics []string) error) metadataRefresh {
+	return newMetadataRefresh(f).Refresh
+}
+
+func newMetadataRefresh(f func(topics []string) error) *singleFlightMetadataRefresher {
+	return &singleFlightMetadataRefresher{
+		current: &currentRefresh{
+			topicsMap: make(map[string]struct{}),
+			refresh:   f,
+		},
+		next: &nextRefresh{},
+	}
+}
+
+// Refresh is the function that clients call when they want to refresh
+// the metadata. This function blocks until a refresh is issued, and its
+// result is received, for the list of topics the caller provided.
+// If a refresh was already ongoing for this list of topics, the function
+// waits on that refresh to complete, and returns its result.
+// If a refresh was already ongoing for a different list of topics, the function
+// accumulates the list of topics to refresh in the next refresh, and queues that refresh.
+// If no refresh is ongoing, it will start a new refresh, and return its result.
+func (m *singleFlightMetadataRefresher) Refresh(topics []string) error {
+	for {
+		ch, queued := m.refreshOrQueue(topics)
+		if !queued {
+			return <-ch
+		}
+		<-ch
+	}
+}
+
+// refreshOrQueue returns a channel the refresh needs to wait on, and a boolean
+// that indicates whether waiting on the channel will return the result of that refresh
+// or whether the refresh was "queued" and the caller needs to wait for the channel to
+// return, and then call refreshOrQueue again.
+// When calling refreshOrQueue, three things can happen:
+//  1. either no refresh is ongoing.
+//     In this case, a new refresh is started, and the channel that's returned will
+//     contain the result of that refresh, so it returns "false" as the second return value.
+//  2. a refresh is ongoing, and it contains the topics we need.
+//     In this case, the channel that's returned will contain the result of that refresh,
+//     so it returns "false" as the second return value.
+//     In this case, the channel that's returned will contain the result of that refresh,
+//     so it returns "false" as the second return value.
+//  3. a refresh is already ongoing, but doesn't contain the topics we need. In this case,
+//     the caller needs to wait for the refresh to finish, and then call refreshOrQueue again.
+//     The channel that's returned is for the current refresh (not the one the caller is
+//     interested in), so it returns "true" as the second return value. The caller needs to
+//     wait on the channel, disregard the value, and call refreshOrQueue again.
+func (m *singleFlightMetadataRefresher) refreshOrQueue(topics []string) (chan error, bool) {
+	m.current.mu.Lock()
+	defer m.current.mu.Unlock()
+	if !m.current.ongoing {
+		// If no refresh is ongoing, we can start a new one, in which
+		// we add the topics that have been accumulated in the next refresh
+		// and the topics that have been provided by the caller.
+		m.next.mu.Lock()
+		m.current.addTopicsFrom(m.next)
+		m.next.clear()
+		m.next.mu.Unlock()
+		m.current.addTopics(topics)
+		ch := m.current.start()
+		return ch, false
+	}
+	if m.current.hasTopics(topics) {
+		// A refresh is ongoing, and we were lucky: it is refreshing the topics we need already:
+		// we just have to wait for it to finish and return its results.
+		ch := m.current.wait()
+		return ch, false
+	}
+	// There is a refresh ongoing, but it is not refreshing the topics we need.
+	// We need to wait for it to finish, and then start a new refresh.
+	ch := m.current.wait()
+	m.next.mu.Lock()
+	m.next.addTopics(topics)
+	m.next.mu.Unlock()
+	// This is where we wait for that refresh to finish, and the loop will take care
+	// of starting the new one.
+	return ch, true
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go
new file mode 100644
index 0000000..f00e5ab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_request.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "encoding/base64"
+
+type Uuid [16]byte
+
+func (u Uuid) String() string {
+	return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:])
+}
+
+var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+type MetadataRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Topics contains the topics to fetch metadata for.
+	Topics []string
+	// AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+	AllowAutoTopicCreation             bool
+	IncludeClusterAuthorizedOperations bool // version 8 and up
+	IncludeTopicAuthorizedOperations   bool // version 8 and up
+}
+
+func (r *MetadataRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest {
+	m := &MetadataRequest{Topics: topics}
+	if version.IsAtLeast(V2_8_0_0) {
+		m.Version = 10
+	} else if version.IsAtLeast(V2_4_0_0) {
+		m.Version = 9
+	} else if version.IsAtLeast(V2_4_0_0) {
+		m.Version = 8
+	} else if version.IsAtLeast(V2_1_0_0) {
+		m.Version = 7
+	} else if version.IsAtLeast(V2_0_0_0) {
+		m.Version = 6
+	} else if version.IsAtLeast(V1_0_0_0) {
+		m.Version = 5
+	} else if version.IsAtLeast(V0_11_0_0) {
+		m.Version = 4
+	} else if version.IsAtLeast(V0_10_1_0) {
+		m.Version = 2
+	} else if version.IsAtLeast(V0_10_0_0) {
+		m.Version = 1
+	}
+	return m
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 10 {
+		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+	}
+	if r.Version == 0 || len(r.Topics) > 0 {
+		if err := pe.putArrayLength(len(r.Topics)); err != nil {
+			return err
+		}
+		if r.Version <= 9 {
+			for _, topicName := range r.Topics {
+				if err := pe.putString(topicName); err != nil {
+					return err
+				}
+				pe.putEmptyTaggedFieldArray()
+			}
+		} else { // r.Version = 10
+			for _, topicName := range r.Topics {
+				if err := pe.putRawBytes(NullUUID); err != nil {
+					return err
+				}
+				// Avoid implicit memory aliasing in for loop
+				tn := topicName
+				if err := pe.putNullableString(&tn); err != nil {
+					return err
+				}
+				pe.putEmptyTaggedFieldArray()
+			}
+		}
+	} else {
+		if err := pe.putArrayLength(-1); err != nil {
+			return err
+		}
+	}
+
+	if r.Version > 3 {
+		pe.putBool(r.AllowAutoTopicCreation)
+	}
+	if r.Version > 7 {
+		pe.putBool(r.IncludeClusterAuthorizedOperations)
+		pe.putBool(r.IncludeTopicAuthorizedOperations)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	size, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if size > 0 {
+		r.Topics = make([]string, size)
+	}
+	if version <= 9 {
+		for i := range r.Topics {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			r.Topics[i] = topic
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	} else {
+		for i := range r.Topics {
+			if _, err = pd.getRawBytes(16); err != nil { // skip UUID
+				return err
+			}
+			topic, err := pd.getNullableString()
+			if err != nil {
+				return err
+			}
+			if topic != nil {
+				r.Topics[i] = *topic
+			}
+
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if r.Version >= 4 {
+		if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version > 7 {
+		includeClusterAuthz, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeClusterAuthorizedOperations = includeClusterAuthz
+		includeTopicAuthz, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeTopicAuthorizedOperations = includeTopicAuthz
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *MetadataRequest) key() int16 {
+	return apiKeyMetadata
+}
+
+func (r *MetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataRequest) headerVersion() int16 {
+	if r.Version >= 9 {
+		return 2
+	}
+	return 1
+}
+
+func (r *MetadataRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataRequest) isFlexibleVersion(version int16) bool {
+	return version >= 9
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 10:
+		return V2_8_0_0
+	case 9:
+		return V2_4_0_0
+	case 8:
+		return V2_3_0_0
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 5:
+		return V1_0_0_0
+	case 3, 4:
+		return V0_11_0_0
+	case 2:
+		return V0_10_1_0
+	case 1:
+		return V0_10_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_8_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go
new file mode 100644
index 0000000..196c075
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_response.go
@@ -0,0 +1,449 @@
+package sarama
+
+import "time"
+
+// PartitionMetadata contains each partition in the topic.
+type PartitionMetadata struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the partition error, or 0 if there was no error.
+	Err KError
+	// ID contains the partition index.
+	ID int32
+	// Leader contains the ID of the leader broker.
+	Leader int32
+	// LeaderEpoch contains the leader epoch of this partition.
+	LeaderEpoch int32
+	// Replicas contains the set of all nodes that host this partition.
+	Replicas []int32
+	// Isr contains the set of nodes that are in sync with the leader for this partition.
+	Isr []int32
+	// OfflineReplicas contains the set of offline replicas of this partition.
+	OfflineReplicas []int32
+}
+
+func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+	p.Version = version
+	p.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if p.ID, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if p.Leader, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if p.Version >= 7 {
+		if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	p.Replicas, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	p.Isr, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	if p.Version >= 5 {
+		p.OfflineReplicas, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+	p.Version = version
+	pe.putKError(p.Err)
+
+	pe.putInt32(p.ID)
+
+	pe.putInt32(p.Leader)
+
+	if p.Version >= 7 {
+		pe.putInt32(p.LeaderEpoch)
+	}
+
+	err = pe.putInt32Array(p.Replicas)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putInt32Array(p.Isr)
+	if err != nil {
+		return err
+	}
+
+	if p.Version >= 5 {
+		err = pe.putInt32Array(p.OfflineReplicas)
+		if err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+// TopicMetadata contains each topic in the response.
+type TopicMetadata struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the topic error, or 0 if there was no error.
+	Err KError
+	// Name contains the topic name.
+	Name string
+	Uuid Uuid
+	// IsInternal contains a True if the topic is internal.
+	IsInternal bool
+	// Partitions contains each partition in the topic.
+	Partitions                []*PartitionMetadata
+	TopicAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	t.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	if t.Version >= 10 {
+		uuid, err := pd.getRawBytes(16)
+		if err != nil {
+			return err
+		}
+		t.Uuid = [16]byte{}
+		for i := 0; i < 16; i++ {
+			t.Uuid[i] = uuid[i]
+		}
+	}
+
+	if t.Version >= 1 {
+		t.IsInternal, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	t.Partitions = make([]*PartitionMetadata, n)
+	for i := 0; i < n; i++ {
+		block := &PartitionMetadata{}
+		if err := block.decode(pd, t.Version); err != nil {
+			return err
+		}
+		t.Partitions[i] = block
+	}
+
+	if t.Version >= 8 {
+		t.TopicAuthorizedOperations, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+	t.Version = version
+	pe.putKError(t.Err)
+
+	err = pe.putString(t.Name)
+	if err != nil {
+		return err
+	}
+
+	if t.Version >= 10 {
+		err = pe.putRawBytes(t.Uuid[:])
+		if err != nil {
+			return err
+		}
+	}
+
+	if t.Version >= 1 {
+		pe.putBool(t.IsInternal)
+	}
+
+	err = pe.putArrayLength(len(t.Partitions))
+	if err != nil {
+		return err
+	}
+	for _, block := range t.Partitions {
+		if err := block.encode(pe, t.Version); err != nil {
+			return err
+		}
+	}
+
+	if t.Version >= 8 {
+		pe.putInt32(t.TopicAuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type MetadataResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTimeMs int32
+	// Brokers contains each broker in the response.
+	Brokers []*Broker
+	// ClusterID contains the cluster ID that responding broker belongs to.
+	ClusterID *string
+	// ControllerID contains the ID of the controller broker.
+	ControllerID int32
+	// Topics contains each topic in the response.
+	Topics                      []*TopicMetadata
+	ClusterAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (r *MetadataResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 3 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	brokerArrayLen, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Brokers = make([]*Broker, brokerArrayLen)
+	for i := 0; i < brokerArrayLen; i++ {
+		r.Brokers[i] = new(Broker)
+		err = r.Brokers[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		r.ClusterID, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		if r.ControllerID, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	topicArrayLen, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]*TopicMetadata, topicArrayLen)
+	for i := 0; i < topicArrayLen; i++ {
+		r.Topics[i] = new(TopicMetadata)
+		err = r.Topics[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 8 {
+		r.ClusterAuthorizedOperations, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	err = pe.putArrayLength(len(r.Brokers))
+	if err != nil {
+		return err
+	}
+
+	for _, broker := range r.Brokers {
+		err = broker.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		err = pe.putNullableString(r.ClusterID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ControllerID)
+	}
+
+	err = pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+	for _, block := range r.Topics {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 8 {
+		pe.putInt32(r.ClusterAuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+	return apiKeyMetadata
+}
+
+func (r *MetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataResponse) headerVersion() int16 {
+	if r.Version < 9 {
+		return 0
+	} else {
+		return 1
+	}
+}
+
+func (r *MetadataResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataResponse) isFlexibleVersion(version int16) bool {
+	return version >= 9
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 10:
+		return V2_8_0_0
+	case 9:
+		return V2_4_0_0
+	case 8:
+		return V2_3_0_0
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 5:
+		return V1_0_0_0
+	case 3, 4:
+		return V0_11_0_0
+	case 2:
+		return V0_10_1_0
+	case 1:
+		return V0_10_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *MetadataResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+	var tmatch *TopicMetadata
+
+	for _, tm := range r.Topics {
+		if tm.Name == topic {
+			tmatch = tm
+			goto foundTopic
+		}
+	}
+
+	tmatch = new(TopicMetadata)
+	tmatch.Name = topic
+	r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+	tmatch.Err = err
+	return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
+	tmatch := r.AddTopic(topic, ErrNoError)
+	var pmatch *PartitionMetadata
+
+	for _, pm := range tmatch.Partitions {
+		if pm.ID == partition {
+			pmatch = pm
+			goto foundPartition
+		}
+	}
+
+	pmatch = new(PartitionMetadata)
+	pmatch.ID = partition
+	tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+	pmatch.Leader = brokerID
+	pmatch.Replicas = replicas
+	if pmatch.Replicas == nil {
+		pmatch.Replicas = []int32{}
+	}
+	pmatch.Isr = isr
+	if pmatch.Isr == nil {
+		pmatch.Isr = []int32{}
+	}
+	pmatch.OfflineReplicas = offline
+	if pmatch.OfflineReplicas == nil {
+		pmatch.OfflineReplicas = []int32{}
+	}
+	pmatch.Err = err
+}
diff --git a/vendor/github.com/IBM/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go
new file mode 100644
index 0000000..4f512f2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metrics.go
@@ -0,0 +1,120 @@
+package sarama
+
+import (
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+	metricsReservoirSize = 1028
+	metricsAlphaFactor   = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+	return r.GetOrRegister(name, func() metrics.Histogram {
+		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+	}).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+	// Use broker id like the Java client as it does not contain '.' or ':' characters that
+	// can be interpreted as special character by monitoring tool (e.g. Graphite)
+	return name + "-for-broker-" + strconv.FormatInt(int64(broker.ID()), 10)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+	// cf. KAFKA-1902 and KAFKA-2337
+	return name + "-for-topic-" + strings.ReplaceAll(topic, ".", "_")
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
+
+// cleanupRegistry is an implementation of metrics.Registry that allows
+// to unregister from the parent registry only those metrics
+// that have been registered in cleanupRegistry
+type cleanupRegistry struct {
+	parent  metrics.Registry
+	metrics map[string]struct{}
+	mutex   sync.RWMutex
+}
+
+func newCleanupRegistry(parent metrics.Registry) metrics.Registry {
+	return &cleanupRegistry{
+		parent:  parent,
+		metrics: map[string]struct{}{},
+	}
+}
+
+func (r *cleanupRegistry) Each(fn func(string, interface{})) {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	wrappedFn := func(name string, iface interface{}) {
+		if _, ok := r.metrics[name]; ok {
+			fn(name, iface)
+		}
+	}
+	r.parent.Each(wrappedFn)
+}
+
+func (r *cleanupRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	if _, ok := r.metrics[name]; ok {
+		return r.parent.Get(name)
+	}
+	return nil
+}
+
+func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.metrics[name] = struct{}{}
+	return r.parent.GetOrRegister(name, metric)
+}
+
+func (r *cleanupRegistry) Register(name string, metric interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.metrics[name] = struct{}{}
+	return r.parent.Register(name, metric)
+}
+
+func (r *cleanupRegistry) RunHealthchecks() {
+	r.parent.RunHealthchecks()
+}
+
+func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} {
+	return r.parent.GetAll()
+}
+
+func (r *cleanupRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if _, ok := r.metrics[name]; ok {
+		delete(r.metrics, name)
+		r.parent.Unregister(name)
+	}
+}
+
+func (r *cleanupRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name := range r.metrics {
+		delete(r.metrics, name)
+		r.parent.Unregister(name)
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go
new file mode 100644
index 0000000..d913d44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockbroker.go
@@ -0,0 +1,467 @@
+package sarama
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"maps"
+	"net"
+	"reflect"
+	"strconv"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+const (
+	expectationTimeout = 500 * time.Millisecond
+)
+
+type GSSApiHandlerFunc func([]byte) []byte
+
+type requestHandlerFunc func(req *request) (res encoderWithHeader)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshalling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behavior.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+	brokerID      int32
+	port          int32
+	closing       chan none
+	stopper       chan none
+	expectations  chan encoderWithHeader
+	listener      net.Listener
+	t             TestReporter
+	latency       time.Duration
+	handler       requestHandlerFunc
+	notifier      RequestNotifierFunc
+	history       []RequestResponse
+	lock          sync.Mutex
+	gssApiHandler GSSApiHandlerFunc
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+	Request  protocolBody
+	Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+	b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+	fnMap := maps.Clone(handlerMap)
+	b.setHandler(func(req *request) (res encoderWithHeader) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		mockResponse := fnMap[reqTypeName]
+		if mockResponse == nil {
+			return nil
+		}
+		return mockResponse.For(req.body)
+	})
+}
+
+// SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a
+// request is received by the broker, it looks up the request type in the map
+// and invoke the found RequestHandlerFunc instance to generate an appropriate reply.
+func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) {
+	fnMap := maps.Clone(handlerMap)
+	b.setHandler(func(req *request) (res encoderWithHeader) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		return fnMap[reqTypeName](req)
+	})
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+	b.lock.Lock()
+	b.notifier = notifier
+	b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+	return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+	b.lock.Lock()
+	history := make([]RequestResponse, len(b.history))
+	copy(history, b.history)
+	b.lock.Unlock()
+	return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+	return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+	return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+	close(b.expectations)
+	if len(b.expectations) > 0 {
+		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+		for e := range b.expectations {
+			_, _ = buf.WriteString(spew.Sdump(e))
+		}
+		b.t.Error(buf.String())
+	}
+	close(b.closing)
+	<-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+	b.lock.Lock()
+	b.handler = handler
+	b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+	defer close(b.stopper)
+	var err error
+	var conn net.Conn
+
+	go func() {
+		<-b.closing
+		err := b.listener.Close()
+		if err != nil {
+			b.t.Error(err)
+		}
+	}()
+
+	wg := &sync.WaitGroup{}
+	i := 0
+	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+		wg.Add(1)
+		go b.handleRequests(conn, i, wg)
+		i++
+	}
+	wg.Wait()
+	if !isConnectionClosedError(err) {
+		Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+	}
+}
+
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+	b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	fullBytes := append(lengthBytes, encodedReq...)
+
+	return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
+func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
+	defer wg.Done()
+	defer func() {
+		_ = conn.Close()
+	}()
+	s := spew.NewDefaultConfig()
+	s.MaxDepth = 1
+	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+	var err error
+
+	abort := make(chan none)
+	defer close(abort)
+	go func() {
+		select {
+		case <-b.closing:
+			_ = conn.Close()
+		case <-abort:
+		}
+	}()
+
+	var bytesWritten int
+	var bytesRead int
+	for {
+		buffer, err := b.readToBytes(conn)
+		if err != nil {
+			if !isConnectionClosedError(err) {
+				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
+				b.serverError(err)
+			}
+			break
+		}
+
+		bytesWritten = 0
+		if !b.isGSSAPI(buffer) {
+			req, br, err := decodeRequest(bytes.NewReader(buffer))
+			bytesRead = br
+			if err != nil {
+				if !isConnectionClosedError(err) {
+					Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+					b.serverError(err)
+				}
+				break
+			}
+
+			if b.latency > 0 {
+				time.Sleep(b.latency)
+			}
+
+			b.lock.Lock()
+			res := b.handler(req)
+			b.history = append(b.history, RequestResponse{req.body, res})
+			b.lock.Unlock()
+
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+				continue
+			}
+			Logger.Printf(
+				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
+				b.brokerID, idx, req.body, res,
+				s.Sprintf("%#v", req.body),
+				s.Sprintf("%#v", res),
+			)
+
+			encodedRes, err := encode(res, nil)
+			if err != nil {
+				b.serverError(fmt.Errorf("failed to encode %T - %w", res, err))
+				break
+			}
+			if len(encodedRes) == 0 {
+				b.lock.Lock()
+				if b.notifier != nil {
+					b.notifier(bytesRead, 0)
+				}
+				b.lock.Unlock()
+				continue
+			}
+
+			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
+			if _, err = conn.Write(resHeader); err != nil {
+				b.serverError(err)
+				break
+			}
+			if _, err = conn.Write(encodedRes); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(resHeader) + len(encodedRes)
+		} else {
+			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+			b.lock.Lock()
+			res := b.gssApiHandler(buffer)
+			b.lock.Unlock()
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+				continue
+			}
+			if _, err = conn.Write(res); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(res)
+		}
+
+		b.lock.Lock()
+		if b.notifier != nil {
+			b.notifier(bytesRead, bytesWritten)
+		}
+		b.lock.Unlock()
+	}
+	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
+	headerLength := uint32(8)
+
+	if headerVersion >= 1 {
+		headerLength = 9
+	}
+
+	resHeader := make([]byte, headerLength)
+	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
+	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
+
+	if headerVersion >= 1 {
+		binary.PutUvarint(resHeader[8:], 0)
+	}
+
+	return resHeader
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
+	select {
+	case res, ok := <-b.expectations:
+		if !ok {
+			return nil
+		}
+		return res
+	case <-time.After(expectationTimeout):
+		return nil
+	}
+}
+
+func isConnectionClosedError(err error) bool {
+	var result bool
+	opError := &net.OpError{}
+	if errors.As(err, &opError) {
+		result = true
+	} else if errors.Is(err, io.EOF) {
+		result = true
+	} else if err.Error() == "use of closed network connection" {
+		result = true
+	}
+
+	return result
+}
+
+func (b *MockBroker) serverError(err error) {
+	b.t.Helper()
+	if isConnectionClosedError(err) {
+		return
+	}
+	b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use.  If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+	return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+	var (
+		listener net.Listener
+		err      error
+	)
+
+	// retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown)
+	for i := 0; i < 20; i++ {
+		listener, err = net.Listen("tcp", addr)
+		if err != nil {
+			if errors.Is(err, syscall.EADDRINUSE) {
+				Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr)
+				time.Sleep(time.Millisecond * 100)
+				continue
+			}
+			t.Fatal(err)
+		}
+		break
+	}
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+	var err error
+
+	broker := &MockBroker{
+		closing:      make(chan none),
+		stopper:      make(chan none),
+		t:            t,
+		brokerID:     brokerID,
+		expectations: make(chan encoderWithHeader, 512),
+		listener:     listener,
+	}
+	broker.handler = broker.defaultRequestHandler
+
+	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := strconv.ParseInt(portStr, 10, 32)
+	if err != nil {
+		t.Fatal(err)
+	}
+	broker.port = int32(tmp)
+
+	go broker.serverLoop()
+
+	return broker
+}
+
+func (b *MockBroker) Returns(e encoderWithHeader) {
+	b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/mockkerberos.go
rename to vendor/github.com/IBM/sarama/mockkerberos.go
diff --git a/vendor/github.com/IBM/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go
new file mode 100644
index 0000000..2c35279
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockresponses.go
@@ -0,0 +1,1531 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Helper()
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+	For(reqBody versionedDecoder) (res encoderWithHeader)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+	res encoderWithHeader
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
+	return mw.res
+}
+
+func NewMockWrapper(res encoderWithHeader) *MockWrapper {
+	return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+	responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+	ms := &MockSequence{}
+	ms.responses = make([]MockResponse, len(responses))
+	for i, res := range responses {
+		switch res := res.(type) {
+		case MockResponse:
+			ms.responses[i] = res
+		case encoderWithHeader:
+			ms.responses[i] = NewMockWrapper(res)
+		default:
+			panic(fmt.Sprintf("Unexpected response type: %T", res))
+		}
+	}
+	return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
+	res = mc.responses[0].For(reqBody)
+	if len(mc.responses) > 1 {
+		mc.responses = mc.responses[1:]
+	}
+	return res
+}
+
+type MockListGroupsResponse struct {
+	groups map[string]string
+	t      TestReporter
+}
+
+func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
+	return &MockListGroupsResponse{
+		groups: make(map[string]string),
+		t:      t,
+	}
+}
+
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	request := reqBody.(*ListGroupsRequest)
+	response := &ListGroupsResponse{
+		Version: request.Version,
+		Groups:  m.groups,
+	}
+	return response
+}
+
+func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
+	m.groups[groupID] = protocolType
+	return m
+}
+
+type MockDescribeGroupsResponse struct {
+	groups map[string]*GroupDescription
+	t      TestReporter
+}
+
+func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
+	return &MockDescribeGroupsResponse{
+		t:      t,
+		groups: make(map[string]*GroupDescription),
+	}
+}
+
+func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
+	m.groups[groupID] = description
+	return m
+}
+
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	request := reqBody.(*DescribeGroupsRequest)
+
+	response := &DescribeGroupsResponse{Version: request.version()}
+	for _, requestedGroup := range request.Groups {
+		if group, ok := m.groups[requestedGroup]; ok {
+			response.Groups = append(response.Groups, group)
+		} else {
+			// Mimic real kafka - if a group doesn't exist, return
+			// an entry with state "Dead"
+			response.Groups = append(response.Groups, &GroupDescription{
+				GroupId: requestedGroup,
+				State:   "Dead",
+			})
+		}
+	}
+
+	return response
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+	controllerID int32
+	errors       map[string]KError
+	leaders      map[string]map[int32]int32
+	brokers      map[string]int32
+	t            TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+	return &MockMetadataResponse{
+		errors:  make(map[string]KError),
+		leaders: make(map[string]map[int32]int32),
+		brokers: make(map[string]int32),
+		t:       t,
+	}
+}
+
+func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse {
+	mmr.errors[topic] = kerror
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+	partitions := mmr.leaders[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int32)
+		mmr.leaders[topic] = partitions
+	}
+	partitions[partition] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+	mmr.brokers[addr] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+	mmr.controllerID = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	metadataRequest := reqBody.(*MetadataRequest)
+	metadataResponse := &MetadataResponse{
+		Version:      metadataRequest.version(),
+		ControllerID: mmr.controllerID,
+	}
+	for addr, brokerID := range mmr.brokers {
+		metadataResponse.AddBroker(addr, brokerID)
+	}
+
+	// Generate set of replicas
+	var replicas []int32
+	var offlineReplicas []int32
+	for _, brokerID := range mmr.brokers {
+		replicas = append(replicas, brokerID)
+	}
+
+	if len(metadataRequest.Topics) == 0 {
+		for topic, partitions := range mmr.leaders {
+			for partition, brokerID := range partitions {
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+			}
+		}
+		for topic, err := range mmr.errors {
+			metadataResponse.AddTopic(topic, err)
+		}
+		return metadataResponse
+	}
+	for _, topic := range metadataRequest.Topics {
+		leaders, ok := mmr.leaders[topic]
+		if !ok {
+			if err, ok := mmr.errors[topic]; ok {
+				metadataResponse.AddTopic(topic, err)
+			} else {
+				metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition)
+			}
+			continue
+		}
+		for partition, brokerID := range leaders {
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+		}
+	}
+	return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+	offsets map[string]map[int32]map[int64]int64
+	t       TestReporter
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+	return &MockOffsetResponse{
+		offsets: make(map[string]map[int32]map[int64]int64),
+		t:       t,
+	}
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]int64)
+		mor.offsets[topic] = partitions
+	}
+	times := partitions[partition]
+	if times == nil {
+		times = make(map[int64]int64)
+		partitions[partition] = times
+	}
+	times[time] = offset
+	return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	offsetRequest := reqBody.(*OffsetRequest)
+	offsetResponse := &OffsetResponse{Version: offsetRequest.Version}
+	for topic, partitions := range offsetRequest.blocks {
+		for partition, block := range partitions {
+			offset := mor.getOffset(topic, partition, block.timestamp)
+			offsetResponse.AddTopicPartition(topic, partition, offset)
+		}
+	}
+	return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		mor.t.Errorf("missing topic: %s", topic)
+	}
+	times := partitions[partition]
+	if times == nil {
+		mor.t.Errorf("missing partition: %d", partition)
+	}
+	offset, ok := times[time]
+	if !ok {
+		mor.t.Errorf("missing time: %d", time)
+	}
+	return offset
+}
+
+// mockMessage is a message that used to be mocked for `FetchResponse`
+type mockMessage struct {
+	key Encoder
+	msg Encoder
+}
+
+func newMockMessage(key, msg Encoder) *mockMessage {
+	return &mockMessage{
+		key: key,
+		msg: msg,
+	}
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+	messages       map[string]map[int32]map[int64]*mockMessage
+	messagesLock   *sync.RWMutex
+	highWaterMarks map[string]map[int32]int64
+	t              TestReporter
+	batchSize      int
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+	return &MockFetchResponse{
+		messages:       make(map[string]map[int32]map[int64]*mockMessage),
+		messagesLock:   &sync.RWMutex{},
+		highWaterMarks: make(map[string]map[int32]int64),
+		t:              t,
+		batchSize:      batchSize,
+	}
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+	return mfr.SetMessageWithKey(topic, partition, offset, nil, msg)
+}
+
+func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse {
+	mfr.messagesLock.Lock()
+	defer mfr.messagesLock.Unlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]*mockMessage)
+		mfr.messages[topic] = partitions
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		messages = make(map[int64]*mockMessage)
+		partitions[partition] = messages
+	}
+	messages[offset] = newMockMessage(key, msg)
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int64)
+		mfr.highWaterMarks[topic] = partitions
+	}
+	partitions[partition] = offset
+	return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	fetchRequest := reqBody.(*FetchRequest)
+	res := &FetchResponse{
+		Version: fetchRequest.Version,
+	}
+	for topic, partitions := range fetchRequest.blocks {
+		for partition, block := range partitions {
+			initialOffset := block.fetchOffset
+			offset := initialOffset
+			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+			for i := 0; i < mfr.batchSize && offset < maxOffset; {
+				msg := mfr.getMessage(topic, partition, offset)
+				if msg != nil {
+					res.AddMessage(topic, partition, msg.key, msg.msg, offset)
+					i++
+				}
+				offset++
+			}
+			fb := res.GetBlock(topic, partition)
+			if fb == nil {
+				res.AddError(topic, partition, ErrNoError)
+				fb = res.GetBlock(topic, partition)
+			}
+			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+		}
+	}
+	return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage {
+	mfr.messagesLock.RLock()
+	defer mfr.messagesLock.RUnlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return nil
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return nil
+	}
+	return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+	mfr.messagesLock.RLock()
+	defer mfr.messagesLock.RUnlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return 0
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return 0
+	}
+	return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		return 0
+	}
+	return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+	coordinators map[string]interface{}
+	t            TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+	return &MockConsumerMetadataResponse{
+		coordinators: make(map[string]interface{}),
+		t:            t,
+	}
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = broker
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = kerror
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ConsumerMetadataRequest)
+	group := req.ConsumerGroup
+	res := &ConsumerMetadataResponse{Version: req.version()}
+	v := mr.coordinators[group]
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+	groupCoordinators map[string]interface{}
+	transCoordinators map[string]interface{}
+	t                 TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+	return &MockFindCoordinatorResponse{
+		groupCoordinators: make(map[string]interface{}),
+		transCoordinators: make(map[string]interface{}),
+		t:                 t,
+	}
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = broker
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = broker
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = kerror
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = kerror
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*FindCoordinatorRequest)
+	res := &FindCoordinatorResponse{Version: req.version()}
+	var v interface{}
+	switch req.CoordinatorType {
+	case CoordinatorGroup:
+		v = mr.groupCoordinators[req.CoordinatorKey]
+	case CoordinatorTransaction:
+		v = mr.transCoordinators[req.CoordinatorKey]
+	}
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+	errors map[string]map[string]map[int32]KError
+	t      TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+	return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[string]map[int32]KError)
+	}
+	topics := mr.errors[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]KError)
+		mr.errors[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		topics[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*OffsetCommitRequest)
+	group := req.ConsumerGroup
+	res := &OffsetCommitResponse{Version: req.version()}
+	for topic, partitions := range req.blocks {
+		for partition := range partitions {
+			res.AddError(topic, partition, mr.getError(group, topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+	topics := mr.errors[group]
+	if topics == nil {
+		return ErrNoError
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+	version int16
+	errors  map[string]map[int32]KError
+	t       TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+	return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+	mr.version = version
+	return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[int32]KError)
+	}
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		mr.errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ProduceRequest)
+	res := &ProduceResponse{
+		Version: req.version(),
+	}
+	if mr.version > 0 {
+		res.Version = mr.version
+	}
+	for topic, partitions := range req.records {
+		for partition := range partitions {
+			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	error   KError
+	t       TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+	return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+	if mr.offsets == nil {
+		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	topics := mr.offsets[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+		mr.offsets[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		topics[topic] = partitions
+	}
+	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+	mr.error = kerror
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*OffsetFetchRequest)
+	group := req.ConsumerGroup
+	res := &OffsetFetchResponse{Version: req.Version}
+
+	for topic, partitions := range mr.offsets[group] {
+		for partition, block := range partitions {
+			res.AddBlock(topic, partition, block)
+		}
+	}
+
+	if res.Version >= 2 {
+		res.Err = mr.error
+	}
+	return res
+}
+
+type MockCreateTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+	return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateTopicsRequest)
+	res := &CreateTopicsResponse{
+		Version: req.Version,
+	}
+	res.TopicErrors = make(map[string]*TopicError)
+
+	for topic := range req.TopicDetails {
+		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create topic with reserved prefix"
+			res.TopicErrors[topic] = &TopicError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteTopicsResponse struct {
+	t     TestReporter
+	error KError
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+	return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteTopicsRequest)
+	res := &DeleteTopicsResponse{Version: req.version()}
+	res.TopicErrorCodes = make(map[string]KError)
+
+	for _, topic := range req.Topics {
+		res.TopicErrorCodes[topic] = mr.error
+	}
+	res.Version = req.Version
+	return res
+}
+
+func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse {
+	mr.error = kerror
+	return mr
+}
+
+type MockCreatePartitionsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+	return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreatePartitionsRequest)
+	res := &CreatePartitionsResponse{Version: req.version()}
+	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+	for topic := range req.TopicPartitions {
+		if strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create partition on topic with reserved prefix"
+			res.TopicPartitionErrors[topic] = &TopicPartitionError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockAlterPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
+	return &MockAlterPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterPartitionReassignmentsRequest)
+	_ = req
+	res := &AlterPartitionReassignmentsResponse{Version: req.version()}
+	return res
+}
+
+type MockListPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
+	return &MockListPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ListPartitionReassignmentsRequest)
+	_ = req
+	res := &ListPartitionReassignmentsResponse{Version: req.version()}
+
+	for topic, partitions := range req.blocks {
+		for _, partition := range partitions {
+			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
+		}
+	}
+
+	return res
+}
+
+type MockElectLeadersResponse struct {
+	t TestReporter
+}
+
+func NewMockElectLeadersResponse(t TestReporter) *MockElectLeadersResponse {
+	return &MockElectLeadersResponse{t: t}
+}
+
+func (mr *MockElectLeadersResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ElectLeadersRequest)
+	res := &ElectLeadersResponse{Version: req.version(), ReplicaElectionResults: map[string]map[int32]*PartitionResult{}}
+
+	for topic, partitions := range req.TopicPartitions {
+		for _, partition := range partitions {
+			res.ReplicaElectionResults[topic] = map[int32]*PartitionResult{
+				partition: {ErrorCode: ErrNoError},
+			}
+		}
+	}
+	return res
+}
+
+type MockDeleteRecordsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+	return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteRecordsRequest)
+	res := &DeleteRecordsResponse{Version: req.version()}
+	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+	for topic, deleteRecordRequestTopic := range req.Topics {
+		partitions := make(map[int32]*DeleteRecordsResponsePartition)
+		for partition := range deleteRecordRequestTopic.PartitionOffsets {
+			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+		}
+		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+	return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	includeSynonyms := req.Version > 0
+	includeSource := req.Version > 0
+
+	for _, r := range req.Resources {
+		var configEntries []*ConfigEntry
+		switch r.Type {
+		case BrokerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "min.insync.replicas",
+					Value:    "2",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case BrokerLoggerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "kafka.controller.KafkaController",
+					Value:    "DEBUG",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case TopicResource:
+			maxMessageBytes := &ConfigEntry{
+				Name:      "max.message.bytes",
+				Value:     "1000000",
+				ReadOnly:  false,
+				Default:   !includeSource,
+				Sensitive: false,
+			}
+			if includeSource {
+				maxMessageBytes.Source = SourceDefault
+			}
+			if includeSynonyms {
+				maxMessageBytes.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "max.message.bytes",
+						ConfigValue: "500000",
+					},
+				}
+			}
+			retentionMs := &ConfigEntry{
+				Name:      "retention.ms",
+				Value:     "5000",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: false,
+			}
+			if includeSynonyms {
+				retentionMs.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "log.retention.ms",
+						ConfigValue: "2500",
+					},
+				}
+			}
+			password := &ConfigEntry{
+				Name:      "password",
+				Value:     "12345",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: true,
+			}
+			configEntries = append(
+				configEntries, maxMessageBytes, retentionMs, password)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
+	return &MockDescribeConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &ResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+	return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
+	return &MockAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockIncrementalAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse {
+	return &MockIncrementalAlterConfigsResponse{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*IncrementalAlterConfigsRequest)
+	res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockIncrementalAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode {
+	return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*IncrementalAlterConfigsRequest)
+	res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockCreateAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+	return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{Version: req.version()}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+	}
+	return res
+}
+
+type MockCreateAclsResponseError struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError {
+	return &MockCreateAclsResponseError{t: t}
+}
+
+func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{Version: req.version()}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest})
+	}
+	return res
+}
+
+type MockListAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+	return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeAclsRequest)
+	res := &DescribeAclsResponse{Version: req.version()}
+	res.Err = ErrNoError
+	acl := &ResourceAcls{}
+	if req.ResourceName != nil {
+		acl.Resource.ResourceName = *req.ResourceName
+	}
+	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
+	acl.Resource.ResourceType = req.ResourceType
+
+	host := "*"
+	if req.Host != nil {
+		host = *req.Host
+	}
+
+	principal := "User:test"
+	if req.Principal != nil {
+		principal = *req.Principal
+	}
+
+	permissionType := req.PermissionType
+	if permissionType == AclPermissionAny {
+		permissionType = AclPermissionAllow
+	}
+
+	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockSaslAuthenticateResponse struct {
+	t                 TestReporter
+	kerror            KError
+	saslAuthBytes     []byte
+	sessionLifetimeMs int64
+}
+
+func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
+	return &MockSaslAuthenticateResponse{t: t}
+}
+
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SaslAuthenticateRequest)
+	res := &SaslAuthenticateResponse{
+		Version:           req.version(),
+		Err:               msar.kerror,
+		SaslAuthBytes:     msar.saslAuthBytes,
+		SessionLifetimeMs: msar.sessionLifetimeMs,
+	}
+	return res
+}
+
+func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
+	msar.kerror = kerror
+	return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
+	msar.saslAuthBytes = saslAuthBytes
+	return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse {
+	msar.sessionLifetimeMs = sessionLifetimeMs
+	return msar
+}
+
+type MockDeleteAclsResponse struct {
+	t TestReporter
+}
+
+type MockSaslHandshakeResponse struct {
+	enabledMechanisms []string
+	kerror            KError
+	t                 TestReporter
+}
+
+func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
+	return &MockSaslHandshakeResponse{t: t}
+}
+
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SaslHandshakeRequest)
+	res := &SaslHandshakeResponse{Version: req.version()}
+	res.Err = mshr.kerror
+	res.EnabledMechanisms = mshr.enabledMechanisms
+	return res
+}
+
+func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
+	mshr.kerror = kerror
+	return mshr
+}
+
+func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
+	mshr.enabledMechanisms = enabledMechanisms
+	return mshr
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+	return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteAclsRequest)
+	res := &DeleteAclsResponse{Version: req.version()}
+
+	for range req.Filters {
+		response := &FilterResponse{Err: ErrNoError}
+		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+		res.FilterResponses = append(res.FilterResponses, response)
+	}
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockDeleteGroupsResponse struct {
+	deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+	return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+	m.deletedGroups = groups
+	return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteGroupsRequest)
+	resp := &DeleteGroupsResponse{
+		Version:         req.version(),
+		GroupErrorCodes: map[string]KError{},
+	}
+	for _, group := range m.deletedGroups {
+		resp.GroupErrorCodes[group] = ErrNoError
+	}
+	return resp
+}
+
+type MockDeleteOffsetResponse struct {
+	errorCode      KError
+	topic          string
+	partition      int32
+	errorPartition KError
+}
+
+func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse {
+	return &MockDeleteOffsetResponse{}
+}
+
+func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse {
+	m.errorCode = errorCode
+	m.topic = topic
+	m.partition = partition
+	m.errorPartition = errorPartition
+	return m
+}
+
+func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteOffsetsRequest)
+	resp := &DeleteOffsetsResponse{
+		Version:   req.version(),
+		ErrorCode: m.errorCode,
+		Errors: map[string]map[int32]KError{
+			m.topic: {m.partition: m.errorPartition},
+		},
+	}
+	return resp
+}
+
+type MockJoinGroupResponse struct {
+	t TestReporter
+
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       []GroupMember
+}
+
+func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
+	return &MockJoinGroupResponse{
+		t:       t,
+		Members: make([]GroupMember, 0),
+	}
+}
+
+func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*JoinGroupRequest)
+	resp := &JoinGroupResponse{
+		Version:       req.Version,
+		ThrottleTime:  m.ThrottleTime,
+		Err:           m.Err,
+		GenerationId:  m.GenerationId,
+		GroupProtocol: m.GroupProtocol,
+		LeaderId:      m.LeaderId,
+		MemberId:      m.MemberId,
+		Members:       m.Members,
+	}
+	return resp
+}
+
+func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
+	m.ThrottleTime = t
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
+	m.GenerationId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
+	m.GroupProtocol = proto
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
+	m.LeaderId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
+	m.MemberId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
+	bin, err := encode(meta, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member metadata: %v", err))
+	}
+	m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin})
+	return m
+}
+
+type MockLeaveGroupResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
+	return &MockLeaveGroupResponse{t: t}
+}
+
+func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*LeaveGroupRequest)
+	resp := &LeaveGroupResponse{
+		Version: req.version(),
+		Err:     m.Err,
+	}
+	return resp
+}
+
+func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockSyncGroupResponse struct {
+	t TestReporter
+
+	Err              KError
+	MemberAssignment []byte
+}
+
+func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
+	return &MockSyncGroupResponse{t: t}
+}
+
+func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SyncGroupRequest)
+	resp := &SyncGroupResponse{
+		Version:          req.version(),
+		Err:              m.Err,
+		MemberAssignment: m.MemberAssignment,
+	}
+	return resp
+}
+
+func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
+	bin, err := encode(assignment, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member assignment: %v", err))
+	}
+	m.MemberAssignment = bin
+	return m
+}
+
+type MockHeartbeatResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
+	return &MockHeartbeatResponse{t: t}
+}
+
+func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*HeartbeatRequest)
+	resp := &HeartbeatResponse{
+		Version: req.version(),
+	}
+	return resp
+}
+
+func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockDescribeLogDirsResponse struct {
+	t       TestReporter
+	logDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
+	return &MockDescribeLogDirsResponse{t: t}
+}
+
+func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
+	var topics []DescribeLogDirsResponseTopic
+	for topic := range topicPartitions {
+		var partitions []DescribeLogDirsResponsePartition
+		for i := 0; i < topicPartitions[topic]; i++ {
+			partitions = append(partitions, DescribeLogDirsResponsePartition{
+				PartitionID: int32(i),
+				IsTemporary: false,
+				OffsetLag:   int64(0),
+				Size:        int64(1234),
+			})
+		}
+		topics = append(topics, DescribeLogDirsResponseTopic{
+			Topic:      topic,
+			Partitions: partitions,
+		})
+	}
+	logDir := DescribeLogDirsResponseDirMetadata{
+		ErrorCode: ErrNoError,
+		Path:      logDirPath,
+		Topics:    topics,
+	}
+	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
+	return m
+}
+
+func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeLogDirsRequest)
+	resp := &DescribeLogDirsResponse{
+		Version: req.version(),
+		LogDirs: m.logDirs,
+	}
+	return resp
+}
+
+type MockApiVersionsResponse struct {
+	t       TestReporter
+	apiKeys []ApiVersionsResponseKey
+}
+
+func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse {
+	return &MockApiVersionsResponse{
+		t: t,
+		apiKeys: []ApiVersionsResponseKey{
+			{
+				ApiKey:     0,
+				MinVersion: 5,
+				MaxVersion: 8,
+			},
+			{
+				ApiKey:     1,
+				MinVersion: 7,
+				MaxVersion: 11,
+			},
+		},
+	}
+}
+
+func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse {
+	m.apiKeys = apiKeys
+	return m
+}
+
+func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ApiVersionsRequest)
+	res := &ApiVersionsResponse{
+		Version: req.Version,
+		ApiKeys: m.apiKeys,
+	}
+	return res
+}
+
+// MockInitProducerIDResponse is an `InitPorducerIDResponse` builder.
+type MockInitProducerIDResponse struct {
+	producerID    int64
+	producerEpoch int16
+	err           KError
+	t             TestReporter
+}
+
+func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse {
+	return &MockInitProducerIDResponse{
+		t: t,
+	}
+}
+
+func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse {
+	m.producerID = int64(id)
+	return m
+}
+
+func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse {
+	m.producerEpoch = int16(epoch)
+	return m
+}
+
+func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse {
+	m.err = err
+	return m
+}
+
+func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*InitProducerIDRequest)
+	res := &InitProducerIDResponse{
+		Version:       req.Version,
+		Err:           m.err,
+		ProducerID:    m.producerID,
+		ProducerEpoch: m.producerEpoch,
+	}
+	return res
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go
new file mode 100644
index 0000000..2c5c693
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_request.go
@@ -0,0 +1,257 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+	offset               int64
+	timestamp            int64
+	committedLeaderEpoch int32
+	metadata             string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(b.offset)
+	if version == 1 {
+		pe.putInt64(b.timestamp)
+	} else if b.timestamp != 0 {
+		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+	}
+	if version >= 6 {
+		pe.putInt32(b.committedLeaderEpoch)
+	}
+
+	return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 1 {
+		if b.timestamp, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	if version >= 6 {
+		if b.committedLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	b.metadata, err = pd.getString()
+	return err
+}
+
+type OffsetCommitRequest struct {
+	ConsumerGroup           string
+	ConsumerGroupGeneration int32   // v1 or later
+	ConsumerID              string  // v1 or later
+	GroupInstanceId         *string // v7 or later
+	RetentionTime           int64   // v2 or later
+
+	// Version can be:
+	// - 0 (kafka 0.8.1 and later)
+	// - 1 (kafka 0.8.2 and later)
+	// - 2 (kafka 0.9.0 and later)
+	// - 3 (kafka 0.11.0 and later)
+	// - 4 (kafka 2.0.0 and later)
+	// - 5&6 (kafka 2.1.0 and later)
+	// - 7 (kafka 2.3.0 and later)
+	Version int16
+	blocks  map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 7 {
+		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+	}
+
+	if err := pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ConsumerGroupGeneration)
+		if err := pe.putString(r.ConsumerID); err != nil {
+			return err
+		}
+	} else {
+		if r.ConsumerGroupGeneration != 0 {
+			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+		}
+		if r.ConsumerID != "" {
+			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+		}
+	}
+
+	// Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+	if r.Version >= 2 && r.Version <= 4 {
+		pe.putInt64(r.RetentionTime)
+	} else if r.RetentionTime != 0 {
+		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+	}
+
+	if r.Version >= 7 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.ConsumerID, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	// Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+	if r.Version >= 2 && r.Version <= 4 {
+		if r.RetentionTime, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 7 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetCommitRequestBlock{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+	return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *OffsetCommitRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_3_0_0
+	case 5, 6:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_9_0_0
+	case 0, 1:
+		return V0_8_2_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+	r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata)
+}
+
+func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+	}
+
+	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+	partitions := r.blocks[topic]
+	if partitions == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	block := partitions[partitionID]
+	if block == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go
new file mode 100644
index 0000000..2af478c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_response.go
@@ -0,0 +1,131 @@
+package sarama
+
+import "time"
+
+type OffsetCommitResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Errors         map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, kerror := range partitions {
+			pe.putInt32(partition)
+			pe.putKError(kerror)
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[name][id], err = pd.getKError()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+	return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *OffsetCommitResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_3_0_0
+	case 5, 6:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_9_0_0
+	case 0, 1:
+		return V0_8_2_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *OffsetCommitResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..0cfbe7f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_request.go
@@ -0,0 +1,206 @@
+package sarama
+
+type OffsetFetchRequest struct {
+	Version       int16
+	ConsumerGroup string
+	RequireStable bool // requires v7+
+	partitions    map[string][]int32
+}
+
+func (r *OffsetFetchRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func NewOffsetFetchRequest(
+	version KafkaVersion,
+	group string,
+	partitions map[string][]int32,
+) *OffsetFetchRequest {
+	request := &OffsetFetchRequest{
+		ConsumerGroup: group,
+		partitions:    partitions,
+	}
+	if version.IsAtLeast(V2_5_0_0) {
+		// Version 7 is adding the require stable flag.
+		request.Version = 7
+	} else if version.IsAtLeast(V2_4_0_0) {
+		// Version 6 is the first flexible version.
+		request.Version = 6
+	} else if version.IsAtLeast(V2_1_0_0) {
+		// Version 3, 4, and 5 are the same as version 2.
+		request.Version = 5
+	} else if version.IsAtLeast(V2_0_0_0) {
+		request.Version = 4
+	} else if version.IsAtLeast(V0_11_0_0) {
+		request.Version = 3
+	} else if version.IsAtLeast(V0_10_2_0) {
+		// Starting in version 2, the request can contain a null topics array to indicate that offsets
+		// for all topics should be fetched. It also returns a top level error code
+		// for group or coordinator level errors.
+		request.Version = 2
+	} else if version.IsAtLeast(V0_8_2_0) {
+		// In version 0, the request read offsets from ZK.
+		//
+		// Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic.
+		request.Version = 1
+	}
+
+	return request
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 7 {
+		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+	}
+
+	err = pe.putString(r.ConsumerGroup)
+	if err != nil {
+		return err
+	}
+
+	if r.partitions == nil && r.Version >= 2 {
+		if err := pe.putArrayLength(-1); err != nil {
+			return err
+		}
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+	}
+
+	for topic, partitions := range r.partitions {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putInt32Array(partitions)
+		if err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if r.RequireStable && r.Version < 7 {
+		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
+	}
+
+	if r.Version >= 7 {
+		pe.putBool(r.RequireStable)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.ConsumerGroup, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	partitionCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+
+	r.partitions = make(map[string][]int32, partitionCount)
+	for i := 0; i < partitionCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+
+		r.partitions[topic] = partitions
+	}
+
+	if r.Version >= 7 {
+		r.RequireStable, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+	return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+
+	return 1
+}
+
+func (r *OffsetFetchRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchRequest) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_5_0_0
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_2_0
+	case 1:
+		return V0_8_2_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_5_0_0
+	}
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+	if r.partitions == nil && r.Version >= 2 {
+		r.partitions = make(map[string][]int32)
+	}
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..0c27563
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_response.go
@@ -0,0 +1,244 @@
+package sarama
+
+import "time"
+
+type OffsetFetchResponseBlock struct {
+	Offset      int64
+	LeaderEpoch int32
+	Metadata    string
+	Err         KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		b.LeaderEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.LeaderEpoch = -1
+	}
+
+	b.Metadata, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt64(b.Offset)
+
+	if version >= 5 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+	err = pe.putString(b.Metadata)
+	if err != nil {
+		return err
+	}
+
+	pe.putKError(b.Err)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type OffsetFetchResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
+	Err            KError
+}
+
+func (r *OffsetFetchResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+	if r.Version >= 2 {
+		pe.putKError(r.Err)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			numBlocks, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Blocks[name] = nil
+			if numBlocks > 0 {
+				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+			}
+			for j := 0; j < numBlocks; j++ {
+				id, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+
+				block := new(OffsetFetchResponseBlock)
+				err = block.decode(pd, version)
+				if err != nil {
+					return err
+				}
+
+				r.Blocks[name][id] = block
+			}
+
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if version >= 2 {
+		r.Err, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+	return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+
+	return 0
+}
+
+func (r *OffsetFetchResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchResponse) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_5_0_0
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_2_0
+	case 1:
+		return V0_8_2_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_5_0_0
+	}
+}
+
+func (r *OffsetFetchResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	partitions := r.Blocks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	partitions[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go
new file mode 100644
index 0000000..0b594c2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_manager.go
@@ -0,0 +1,663 @@
+package sarama
+
+import (
+	"sync"
+	"time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+	// It will return an error if this OffsetManager is already managing the given
+	// topic/partition.
+	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+	// Close stops the OffsetManager from managing offsets. It is required to call
+	// this function before an OffsetManager object passes out of scope, as it
+	// will otherwise leak memory. You must call this after all the
+	// PartitionOffsetManagers are closed.
+	Close() error
+
+	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
+	// set to false.
+	Commit()
+}
+
+type offsetManager struct {
+	client          Client
+	conf            *Config
+	group           string
+	ticker          *time.Ticker
+	sessionCanceler func()
+
+	memberID        string
+	groupInstanceId *string
+	generation      int32
+
+	broker     *Broker
+	brokerLock sync.RWMutex
+
+	poms     map[string]map[int32]*partitionOffsetManager
+	pomsLock sync.RWMutex
+
+	closeOnce sync.Once
+	closing   chan none
+	closed    chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	conf := client.Config()
+	om := &offsetManager{
+		client:          client,
+		conf:            conf,
+		group:           group,
+		poms:            make(map[string]map[int32]*partitionOffsetManager),
+		sessionCanceler: sessionCanceler,
+
+		memberID:   memberID,
+		generation: generation,
+
+		closing: make(chan none),
+		closed:  make(chan none),
+	}
+	if conf.Consumer.Group.InstanceId != "" {
+		om.groupInstanceId = &conf.Consumer.Group.InstanceId
+	}
+	if conf.Consumer.Offsets.AutoCommit.Enable {
+		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
+		go withRecover(om.mainLoop)
+	}
+
+	return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+	pom, err := om.newPartitionOffsetManager(topic, partition)
+	if err != nil {
+		return nil, err
+	}
+
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	topicManagers := om.poms[topic]
+	if topicManagers == nil {
+		topicManagers = make(map[int32]*partitionOffsetManager)
+		om.poms[topic] = topicManagers
+	}
+
+	if topicManagers[partition] != nil {
+		return nil, ConfigurationError("That topic/partition is already being managed")
+	}
+
+	topicManagers[partition] = pom
+	return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+	om.closeOnce.Do(func() {
+		// exit the mainLoop
+		close(om.closing)
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			<-om.closed
+		}
+
+		// mark all POMs as closed
+		om.asyncClosePOMs()
+
+		// flush one last time
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+				om.flushToBroker()
+				if om.releasePOMs(false) == 0 {
+					break
+				}
+			}
+		}
+
+		om.releasePOMs(true)
+		om.brokerLock.Lock()
+		om.broker = nil
+		om.brokerLock.Unlock()
+	})
+	return nil
+}
+
+func (om *offsetManager) computeBackoff(retries int) time.Duration {
+	if om.conf.Metadata.Retry.BackoffFunc != nil {
+		return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
+	} else {
+		return om.conf.Metadata.Retry.Backoff
+	}
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) {
+	broker, err := om.coordinator()
+	if err != nil {
+		if retries <= 0 {
+			return 0, 0, "", err
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	partitions := map[string][]int32{topic: {partition}}
+	req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions)
+	resp, err := broker.FetchOffset(req)
+	if err != nil {
+		if retries <= 0 {
+			return 0, 0, "", err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	block := resp.GetBlock(topic, partition)
+	if block == nil {
+		return 0, 0, "", ErrIncompleteResponse
+	}
+
+	switch block.Err {
+	case ErrNoError:
+		return block.Offset, block.LeaderEpoch, block.Metadata, nil
+	case ErrNotCoordinatorForConsumer:
+		if retries <= 0 {
+			return 0, 0, "", block.Err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	case ErrOffsetsLoadInProgress:
+		if retries <= 0 {
+			return 0, 0, "", block.Err
+		}
+		backoff := om.computeBackoff(retries)
+		select {
+		case <-om.closing:
+			return 0, 0, "", block.Err
+		case <-time.After(backoff):
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	default:
+		return 0, 0, "", block.Err
+	}
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+	om.brokerLock.RLock()
+	broker := om.broker
+	om.brokerLock.RUnlock()
+
+	if broker != nil {
+		return broker, nil
+	}
+
+	om.brokerLock.Lock()
+	defer om.brokerLock.Unlock()
+
+	if broker := om.broker; broker != nil {
+		return broker, nil
+	}
+
+	if err := om.client.RefreshCoordinator(om.group); err != nil {
+		return nil, err
+	}
+
+	broker, err := om.client.Coordinator(om.group)
+	if err != nil {
+		return nil, err
+	}
+
+	om.broker = broker
+	return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+	om.brokerLock.Lock()
+	if om.broker == b {
+		om.broker = nil
+	}
+	om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+	defer om.ticker.Stop()
+	defer close(om.closed)
+
+	for {
+		select {
+		case <-om.ticker.C:
+			om.Commit()
+		case <-om.closing:
+			return
+		}
+	}
+}
+
+func (om *offsetManager) Commit() {
+	om.flushToBroker()
+	om.releasePOMs(false)
+}
+
+func (om *offsetManager) flushToBroker() {
+	broker, err := om.coordinator()
+	if err != nil {
+		om.handleError(err)
+		return
+	}
+
+	// Care needs to be taken to unlock this. Don't want to defer the unlock as this would
+	// cause the lock to be held while waiting for the broker to reply.
+	broker.lock.Lock()
+	req := om.constructRequest()
+	if req == nil {
+		broker.lock.Unlock()
+		return
+	}
+	resp, rp, err := sendOffsetCommit(broker, req)
+	broker.lock.Unlock()
+
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	err = handleResponsePromise(req, resp, rp, nil)
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	broker.handleThrottledResponse(resp)
+	om.handleResponse(broker, req, resp)
+}
+
+func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) {
+	resp := new(OffsetCommitResponse)
+
+	promise, err := coordinator.send(req, resp)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return resp, promise, nil
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+	r := &OffsetCommitRequest{
+		Version:                 1,
+		ConsumerGroup:           om.group,
+		ConsumerID:              om.memberID,
+		ConsumerGroupGeneration: om.generation,
+	}
+	// Version 1 adds timestamp and group membership information, as well as the commit timestamp.
+	//
+	// Version 2 adds retention time.  It removes the commit timestamp added in version 1.
+	if om.conf.Version.IsAtLeast(V0_9_0_0) {
+		r.Version = 2
+	}
+	// Version 3 and 4 are the same as version 2.
+	if om.conf.Version.IsAtLeast(V0_11_0_0) {
+		r.Version = 3
+	}
+	if om.conf.Version.IsAtLeast(V2_0_0_0) {
+		r.Version = 4
+	}
+	// Version 5 removes the retention time, which is now controlled only by a broker configuration.
+	//
+	// Version 6 adds the leader epoch for fencing.
+	if om.conf.Version.IsAtLeast(V2_1_0_0) {
+		r.Version = 6
+	}
+	// version 7 adds a new field called groupInstanceId to indicate member identity across restarts.
+	if om.conf.Version.IsAtLeast(V2_3_0_0) {
+		r.Version = 7
+		r.GroupInstanceId = om.groupInstanceId
+	}
+
+	// commit timestamp was only briefly supported in V1 where we set it to
+	// ReceiveTime (-1) to tell the broker to set it to the time when the commit
+	// request was received
+	var commitTimestamp int64
+	if r.Version == 1 {
+		commitTimestamp = ReceiveTime
+	}
+
+	// request controlled retention was only supported from V2-V4 (it became
+	// broker-only after that) so if the user has set the config options then
+	// flow those through as retention time on the commit request.
+	if r.Version >= 2 && r.Version < 5 {
+		// Map Sarama's default of 0 to Kafka's default of -1
+		r.RetentionTime = -1
+		if om.conf.Consumer.Offsets.Retention > 0 {
+			r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond)
+		}
+	}
+
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.lock.Lock()
+			if pom.dirty {
+				r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata)
+			}
+			pom.lock.Unlock()
+		}
+	}
+
+	if len(r.blocks) > 0 {
+		return r
+	}
+
+	return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+				continue
+			}
+
+			var err KError
+			var ok bool
+
+			if resp.Errors[pom.topic] == nil {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+
+			switch err {
+			case ErrNoError:
+				block := req.blocks[pom.topic][pom.partition]
+				pom.updateCommitted(block.offset, block.metadata)
+			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+				// not a critical error, we just need to redispatch
+				om.releaseCoordinator(broker)
+			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+				// nothing we can do about this, just tell the user and carry on
+				pom.handleError(err)
+			case ErrOffsetsLoadInProgress:
+				// nothing wrong but we didn't commit, we'll get it next time round
+			case ErrFencedInstancedId:
+				pom.handleError(err)
+				// TODO close the whole consumer for instance fenced....
+				om.tryCancelSession()
+			case ErrUnknownTopicOrPartition:
+				// let the user know *and* try redispatching - if topic-auto-create is
+				// enabled, redispatching should trigger a metadata req and create the
+				// topic; if not then re-dispatching won't help, but we've let the user
+				// know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706)
+				fallthrough
+			default:
+				// dunno, tell the user and try redispatching
+				pom.handleError(err)
+				om.releaseCoordinator(broker)
+			}
+		}
+	}
+}
+
+func (om *offsetManager) handleError(err error) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.handleError(err)
+		}
+	}
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.AsyncClose()
+		}
+	}
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	for topic, topicManagers := range om.poms {
+		for partition, pom := range topicManagers {
+			pom.lock.Lock()
+			releaseDue := pom.done && (force || !pom.dirty)
+			pom.lock.Unlock()
+
+			if releaseDue {
+				pom.release()
+
+				delete(om.poms[topic], partition)
+				if len(om.poms[topic]) == 0 {
+					delete(om.poms, topic)
+				}
+			}
+		}
+		remaining += len(om.poms[topic])
+	}
+	return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	if partitions, ok := om.poms[topic]; ok {
+		if pom, ok := partitions[partition]; ok {
+			return pom
+		}
+	}
+	return nil
+}
+
+func (om *offsetManager) tryCancelSession() {
+	if om.sessionCanceler != nil {
+		om.sessionCanceler()
+	}
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+	// NextOffset returns the next offset that should be consumed for the managed
+	// partition, accompanied by metadata which can be used to reconstruct the state
+	// of the partition consumer when it resumes. NextOffset() will return
+	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+	// was committed for this partition yet.
+	NextOffset() (int64, string)
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(offset int64, metadata string)
+
+	// Errors returns a read channel of errors that occur during offset management, if
+	// enabled. By default, errors are logged and not returned over this channel. If
+	// you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+	// return immediately, after which you should wait until the 'errors' channel has
+	// been drained and closed. It is required to call this function, or Close before
+	// a consumer object passes out of scope, as it will otherwise leak memory. You
+	// must call this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionOffsetManager from managing offsets. It is required to
+	// call this function (or AsyncClose) before a PartitionOffsetManager object
+	// passes out of scope, as it will otherwise leak memory. You must call this
+	// before calling Close on the underlying client.
+	Close() error
+}
+
+type partitionOffsetManager struct {
+	parent      *offsetManager
+	topic       string
+	partition   int32
+	leaderEpoch int32
+
+	lock     sync.Mutex
+	offset   int64
+	metadata string
+	dirty    bool
+	done     bool
+
+	releaseOnce sync.Once
+	errors      chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+	offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+	if err != nil {
+		return nil, err
+	}
+
+	return &partitionOffsetManager{
+		parent:      om,
+		topic:       topic,
+		partition:   partition,
+		leaderEpoch: leaderEpoch,
+		errors:      make(chan *ConsumerError, om.conf.ChannelBufferSize),
+		offset:      offset,
+		metadata:    metadata,
+	}, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+	return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset > pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset <= pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset == offset && pom.metadata == metadata {
+		pom.dirty = false
+	}
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset >= 0 {
+		return pom.offset, pom.metadata
+	}
+
+	return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+	pom.lock.Lock()
+	pom.done = true
+	pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+	pom.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range pom.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+	cErr := &ConsumerError{
+		Topic:     pom.topic,
+		Partition: pom.partition,
+		Err:       err,
+	}
+
+	if pom.parent.conf.Consumer.Return.Errors {
+		pom.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (pom *partitionOffsetManager) release() {
+	pom.releaseOnce.Do(func() {
+		close(pom.errors)
+	})
+}
diff --git a/vendor/github.com/IBM/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go
new file mode 100644
index 0000000..01fbb33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_request.go
@@ -0,0 +1,236 @@
+package sarama
+
+type offsetRequestBlock struct {
+	// currentLeaderEpoch contains the current leader epoch (used in version 4+).
+	currentLeaderEpoch int32
+	// timestamp contains the current timestamp.
+	timestamp int64
+	// maxNumOffsets contains the maximum number of offsets to report.
+	maxNumOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+	if version >= 4 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
+
+	pe.putInt64(b.timestamp)
+
+	if version == 0 {
+		pe.putInt32(b.maxNumOffsets)
+	}
+
+	return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.currentLeaderEpoch = -1
+	if version >= 4 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if b.timestamp, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if version == 0 {
+		if b.maxNumOffsets, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type OffsetRequest struct {
+	Version        int16
+	IsolationLevel IsolationLevel
+	replicaID      int32
+	isReplicaIDSet bool
+	blocks         map[string]map[int32]*offsetRequestBlock
+}
+
+func NewOffsetRequest(version KafkaVersion) *OffsetRequest {
+	request := &OffsetRequest{}
+	if version.IsAtLeast(V2_2_0_0) {
+		// Version 5 adds a new error code, OFFSET_NOT_AVAILABLE.
+		request.Version = 5
+	} else if version.IsAtLeast(V2_1_0_0) {
+		// Version 4 adds the current leader epoch, which is used for fencing.
+		request.Version = 4
+	} else if version.IsAtLeast(V2_0_0_0) {
+		// Version 3 is the same as version 2.
+		request.Version = 3
+	} else if version.IsAtLeast(V0_11_0_0) {
+		// Version 2 adds the isolation level, which is used for transactional reads.
+		request.Version = 2
+	} else if version.IsAtLeast(V0_10_1_0) {
+		// Version 1 removes MaxNumOffsets.  From this version forward, only a single
+		// offset can be returned.
+		request.Version = 1
+	}
+	return request
+}
+
+func (r *OffsetRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+	if r.isReplicaIDSet {
+		pe.putInt32(r.replicaID)
+	} else {
+		// default replica ID is always -1 for clients
+		pe.putInt32(-1)
+	}
+
+	if r.Version >= 2 {
+		pe.putBool(r.IsolationLevel == ReadCommitted)
+	}
+
+	err := pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	replicaID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if replicaID >= 0 {
+		r.SetReplicaID(replicaID)
+	}
+
+	if r.Version >= 2 {
+		tmp, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		r.IsolationLevel = ReadUncommitted
+		if tmp {
+			r.IsolationLevel = ReadCommitted
+		}
+	}
+
+	blockCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if blockCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	for i := 0; i < blockCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetRequestBlock{}
+			if err := block.decode(pd, version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+	return apiKeyListOffsets
+}
+
+func (r *OffsetRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *OffsetRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_2_0_0
+	case 4:
+		return V2_1_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+	r.replicaID = id
+	r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+	if r.isReplicaIDSet {
+		return r.replicaID
+	}
+	return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+	}
+
+	tmp := new(offsetRequestBlock)
+	tmp.currentLeaderEpoch = -1
+	tmp.timestamp = timestamp
+	if r.Version == 0 {
+		tmp.maxNumOffsets = maxOffsets
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go
new file mode 100644
index 0000000..d57c24f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_response.go
@@ -0,0 +1,230 @@
+package sarama
+
+import "time"
+
+type OffsetResponseBlock struct {
+	Err KError
+	// Offsets contains the result offsets (for V0/V1 compatibility)
+	Offsets []int64 // Version 0
+	// Timestamp contains the timestamp associated with the returned offset.
+	Timestamp int64 // Version 1
+	// Offset contains the returned offset.
+	Offset int64 // Version 1
+	// LeaderEpoch contains the current leader epoch of the partition.
+	LeaderEpoch int32
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version == 0 {
+		b.Offsets, err = pd.getInt64Array()
+		return err
+	}
+
+	if version >= 1 {
+		b.Timestamp, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		b.Offset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		// For backwards compatibility put the offset in the offsets array too
+		b.Offsets = []int64{b.Offset}
+	}
+
+	if version >= 4 {
+		if b.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+
+	if version == 0 {
+		return pe.putInt64Array(b.Offsets)
+	}
+
+	if version >= 1 {
+		pe.putInt64(b.Timestamp)
+		pe.putInt64(b.Offset)
+	}
+
+	if version >= 4 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+
+	return nil
+}
+
+type OffsetResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 2 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.version()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+	return apiKeyListOffsets
+}
+
+func (r *OffsetResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *OffsetResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_2_0_0
+	case 4:
+		return V2_1_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *OffsetResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*OffsetResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/IBM/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go
new file mode 100644
index 0000000..241c6c6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_decoder.go
@@ -0,0 +1,79 @@
+package sarama
+
+import (
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type taggedFieldDecoderFunc func(pd packetDecoder) error
+type taggedFieldDecoders map[uint64]taggedFieldDecoderFunc
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+	// Primitives
+	getInt8() (int8, error)
+	getInt16() (int16, error)
+	getInt32() (int32, error)
+	getInt64() (int64, error)
+	getVarint() (int64, error)
+	getUVarint() (uint64, error)
+	getFloat64() (float64, error)
+	getArrayLength() (int, error)
+	getBool() (bool, error)
+	getKError() (KError, error)
+	getDurationMs() (time.Duration, error)
+	getEmptyTaggedFieldArray() (int, error)
+	getTaggedFieldArray(taggedFieldDecoders) error
+
+	// Collections
+	getBytes() ([]byte, error)
+	getVarintBytes() ([]byte, error)
+	getRawBytes(length int) ([]byte, error)
+	getString() (string, error)
+	getNullableString() (*string, error)
+	getInt32Array() ([]int32, error)
+	getInt64Array() ([]int64, error)
+	getStringArray() ([]string, error)
+
+	// Subsets
+	remaining() int
+	getSubset(length int) (packetDecoder, error)
+	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
+
+	// Stacks, see PushDecoder
+	push(in pushDecoder) error
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and check the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+	check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+	pushDecoder
+	decoder
+}
diff --git a/vendor/github.com/IBM/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go
new file mode 100644
index 0000000..674a550
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_encoder.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+	// Primitives
+	putInt8(in int8)
+	putInt16(in int16)
+	putInt32(in int32)
+	putInt64(in int64)
+	putVarint(in int64)
+	putUVarint(in uint64)
+	putFloat64(in float64)
+	putArrayLength(in int) error
+	putBool(in bool)
+	putKError(in KError)
+	putDurationMs(in time.Duration)
+
+	// Collections
+	putBytes(in []byte) error
+	putVarintBytes(in []byte) error
+	putRawBytes(in []byte) error
+	putString(in string) error
+	putNullableString(in *string) error
+	putStringArray(in []string) error
+	putInt32Array(in []int32) error
+	putInt64Array(in []int64) error
+	putNullableInt32Array(in []int32) error
+	putEmptyTaggedFieldArray()
+
+	// Provide the current offset to record the batch size metric
+	offset() int
+
+	// Stacks, see PushEncoder
+	push(in pushEncoder)
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and write the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+	// of data to the saved offset, based on the data between the saved offset and curOffset.
+	run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+	pushEncoder
+
+	// Called during pop() to adjust the length of the field.
+	// It should return the difference in bytes between the last computed length and current length.
+	adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/IBM/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go
new file mode 100644
index 0000000..50a345a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/partitioner.go
@@ -0,0 +1,248 @@
+package sarama
+
+import (
+	"hash"
+	"hash/crc32"
+	"hash/fnv"
+	"math/rand"
+	"time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+	// Partition takes a message and partition count and chooses a partition
+	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+	// RequiresConsistency indicates to the user of the partitioner whether the
+	// mapping of key->partition is consistent or not. Specifically, if a
+	// partitioner requires consistency then it must be allowed to choose from all
+	// partitions (even ones known to be unavailable), and its choice must be
+	// respected by the caller. The obvious example is the HashPartitioner.
+	RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+	Partitioner
+
+	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+	// but takes in the message being partitioned so that the partitioner can
+	// make a per-message determination.
+	MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionerOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.referenceAbs = true
+	}
+}
+
+// WithHashUnsigned means the partitioner treats the hashed value as unsigned when
+// partitioning.  This is intended to be combined with the crc32 hash algorithm to
+// be compatible with librdkafka's implementation
+func WithHashUnsigned() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hashUnsigned = true
+	}
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hasher = hasher()
+	}
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.random = randomHP
+	}
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+	return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+type randomPartitioner struct {
+	generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+	p := new(randomPartitioner)
+	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+	return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type roundRobinPartitioner struct {
+	partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+	return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if p.partition >= numPartitions {
+		p.partition = 0
+	}
+	ret := p.partition
+	p.partition++
+	return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type hashPartitioner struct {
+	random       Partitioner
+	hasher       hash.Hash32
+	referenceAbs bool
+	hashUnsigned bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = hasher()
+		p.referenceAbs = false
+		p.hashUnsigned = false
+		return p
+	}
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = fnv.New32a()
+		p.referenceAbs = false
+		p.hashUnsigned = false
+		for _, option := range options {
+			option(p)
+		}
+		return p
+	}
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = false
+	p.hashUnsigned = false
+	return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviors. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = true
+	p.hashUnsigned = false
+	return p
+}
+
+// NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash
+// of the encoded bytes of the message key modulus the number of partitions.  This is compatible with
+// librdkafka's `consistent_random` partitioner
+func NewConsistentCRCHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = crc32.NewIEEE()
+	p.referenceAbs = false
+	p.hashUnsigned = true
+	return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if message.Key == nil {
+		return p.random.Partition(message, numPartitions)
+	}
+	bytes, err := message.Key.Encode()
+	if err != nil {
+		return -1, err
+	}
+	p.hasher.Reset()
+	_, err = p.hasher.Write(bytes)
+	if err != nil {
+		return -1, err
+	}
+	var partition int32
+	// Turns out we were doing our absolute value in a subtly different way from the upstream
+	// implementation, but now we need to maintain backwards compat for people who started using
+	// the old version; if referenceAbs is set we are compatible with the reference java client
+	// but not past Sarama versions
+	if p.referenceAbs {
+		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+	} else if p.hashUnsigned {
+		// librdkafka treats the hashed value as unsigned.  If `hashUnsigned` is set we are compatible
+		// with librdkafka's `consistent` partitioning but not past Sarama versions
+		partition = int32(p.hasher.Sum32() % uint32(numPartitions))
+	} else {
+		partition = int32(p.hasher.Sum32()) % numPartitions
+		if partition < 0 {
+			partition = -partition
+		}
+	}
+	return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+	return message.Key != nil
+}
diff --git a/vendor/github.com/IBM/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go
new file mode 100644
index 0000000..d30f6e5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/prep_encoder.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+	stack  []pushEncoder
+	length int
+}
+
+type prepFlexibleEncoder struct {
+	*prepEncoder
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+	pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putUVarint(in uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutUvarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putFloat64(in float64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+	if in > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+	}
+	pe.length += 4
+	return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putKError(in KError) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putDurationMs(in time.Duration) {
+	pe.length += 4
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+	pe.length += 4
+	if in == nil {
+		return nil
+	}
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		pe.putVarint(-1)
+		return nil
+	}
+	pe.putVarint(int64(len(in)))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+	if len(in) > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.length += 2
+		return nil
+	}
+	return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+	pe.length += 2
+	if len(in) > math.MaxInt16 {
+		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		pe.length += 4
+		return nil
+	}
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 8 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (pe *prepEncoder) offset() int {
+	return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+	in.saveOffset(pe.length)
+	pe.length += in.reserveLength()
+	pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+	in := pe.stack[len(pe.stack)-1]
+	pe.stack = pe.stack[:len(pe.stack)-1]
+	if dpe, ok := in.(dynamicPushEncoder); ok {
+		pe.length += dpe.adjustLength(pe.length)
+	}
+
+	return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putArrayLength(in int) error {
+	pe.putUVarint(uint64(in + 1))
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putBytes(in []byte) error {
+	pe.putUVarint(uint64(len(in) + 1))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepFlexibleEncoder) putString(in string) error {
+	if err := pe.putArrayLength(len(in)); err != nil {
+		return err
+	}
+	return pe.putRawBytes([]byte(in))
+}
+
+func (pe *prepFlexibleEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	} else {
+		return pe.putString(*in)
+	}
+}
+
+func (pe *prepFlexibleEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putEmptyTaggedFieldArray() {
+	pe.putUVarint(0)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go
new file mode 100644
index 0000000..de88ba4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_request.go
@@ -0,0 +1,274 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all in-sync replicas to commit before responding.
+	// The minimum number of in-sync replicas is configured on the broker via
+	// the `min.insync.replicas` configuration key.
+	WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+	TransactionalID *string
+	RequiredAcks    RequiredAcks
+	Timeout         int32
+	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+	records         map[string]map[int32]Records
+}
+
+func (r *ProduceRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+	var topicRecordCount int64
+	for _, messageBlock := range msgSet.Messages {
+		// Is this a fake "message" wrapping real messages?
+		if messageBlock.Msg.Set != nil {
+			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+		} else {
+			// A single uncompressed message
+			topicRecordCount++
+		}
+		// Better be safe than sorry when computing the compression ratio
+		if messageBlock.Msg.compressedSize != 0 {
+			compressionRatio := float64(len(messageBlock.Msg.Value)) /
+				float64(messageBlock.Msg.compressedSize)
+			// Histogram do not support decimal values, let's multiple it by 100 for better precision
+			intCompressionRatio := int64(100 * compressionRatio)
+			compressionRatioMetric.Update(intCompressionRatio)
+			topicCompressionRatioMetric.Update(intCompressionRatio)
+		}
+	}
+	return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+	if recordBatch.compressedRecords != nil {
+		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+		compressionRatioMetric.Update(compressionRatio)
+		topicCompressionRatioMetric.Update(compressionRatio)
+	}
+
+	return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.TransactionalID); err != nil {
+			return err
+		}
+	}
+	pe.putInt16(int16(r.RequiredAcks))
+	pe.putInt32(r.Timeout)
+	metricRegistry := pe.metricRegistry()
+	var batchSizeMetric metrics.Histogram
+	var compressionRatioMetric metrics.Histogram
+	if metricRegistry != nil {
+		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+	}
+	totalRecordCount := int64(0)
+
+	err := pe.putArrayLength(len(r.records))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.records {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		topicRecordCount := int64(0)
+		var topicCompressionRatioMetric metrics.Histogram
+		if metricRegistry != nil {
+			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+		}
+		for id, records := range partitions {
+			startOffset := pe.offset()
+			pe.putInt32(id)
+			pe.push(&lengthField{})
+			err = records.encode(pe)
+			if err != nil {
+				return err
+			}
+			err = pe.pop()
+			if err != nil {
+				return err
+			}
+			if metricRegistry != nil {
+				if r.Version >= 3 {
+					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+				} else {
+					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+				}
+				batchSize := int64(pe.offset() - startOffset)
+				batchSizeMetric.Update(batchSize)
+				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+			}
+		}
+		if topicRecordCount > 0 {
+			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+			totalRecordCount += topicRecordCount
+		}
+	}
+	if totalRecordCount > 0 {
+		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	if version >= 3 {
+		id, err := pd.getNullableString()
+		if err != nil {
+			return err
+		}
+		r.TransactionalID = id
+	}
+	requiredAcks, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.RequiredAcks = RequiredAcks(requiredAcks)
+	if r.Timeout, err = pd.getInt32(); err != nil {
+		return err
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.records = make(map[string]map[int32]Records)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.records[topic] = make(map[int32]Records)
+
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			size, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			recordsDecoder, err := pd.getSubset(int(size))
+			if err != nil {
+				return err
+			}
+			var records Records
+			if err := records.decode(recordsDecoder); err != nil {
+				return err
+			}
+			r.records[topic][partition] = records
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+	return apiKeyProduce
+}
+
+func (r *ProduceRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *ProduceRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 4, 5:
+		return V1_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+	if r.records == nil {
+		r.records = make(map[string]map[int32]Records)
+	}
+
+	if r.records[topic] == nil {
+		r.records[topic] = make(map[int32]Records)
+	}
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+	r.ensureRecords(topic, partition)
+	set := r.records[topic][partition].MsgSet
+
+	if set == nil {
+		set = new(MessageSet)
+		r.records[topic][partition] = newLegacyRecords(set)
+	}
+
+	set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go
new file mode 100644
index 0000000..f3c5d5d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_response.go
@@ -0,0 +1,237 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+// Protocol, http://kafka.apache.org/protocol.html
+// v1
+// v2 = v3 = v4
+// v5 = v6 = v7
+// Produce Response (Version: 7) => [responses] throttle_time_ms
+//   responses => topic [partition_responses]
+//     topic => STRING
+//     partition_responses => partition error_code base_offset log_append_time log_start_offset
+//       partition => INT32
+//       error_code => INT16
+//       base_offset => INT64
+//       log_append_time => INT64
+//       log_start_offset => INT64
+//   throttle_time_ms => INT32
+
+// partition_responses in protocol
+type ProduceResponseBlock struct {
+	Err         KError    // v0, error_code
+	Offset      int64     // v0, base_offset
+	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
+	StartOffset int64     // v5, log_start_offset
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if millis, err := pd.getInt64(); err != nil {
+			return err
+		} else if millis != -1 {
+			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+		}
+	}
+
+	if version >= 5 {
+		b.StartOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+	pe.putInt64(b.Offset)
+
+	if version >= 2 {
+		timestamp := int64(-1)
+		if !b.Timestamp.Before(time.Unix(0, 0)) {
+			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+		} else if !b.Timestamp.IsZero() {
+			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+		}
+		pe.putInt64(timestamp)
+	}
+
+	if version >= 5 {
+		pe.putInt64(b.StartOffset)
+	}
+
+	return nil
+}
+
+type ProduceResponse struct {
+	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
+	Version      int16
+	ThrottleTime time.Duration // v1, throttle_time_ms
+}
+
+func (r *ProduceResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(ProduceResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for id, prb := range partitions {
+			pe.putInt32(id)
+			err = prb.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putDurationMs(r.ThrottleTime)
+	}
+	return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+	return apiKeyProduce
+}
+
+func (r *ProduceResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *ProduceResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 4, 5:
+		return V1_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *ProduceResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*ProduceResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	block := &ProduceResponseBlock{
+		Err: err,
+	}
+	if r.Version >= 2 {
+		block.Timestamp = time.Now()
+	}
+	byTopic[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go
new file mode 100644
index 0000000..c91403d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_set.go
@@ -0,0 +1,289 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"time"
+)
+
+type partitionSet struct {
+	msgs          []*ProducerMessage
+	recordsToSend Records
+	bufferBytes   int
+}
+
+type produceSet struct {
+	parent        *asyncProducer
+	msgs          map[string]map[int32]*partitionSet
+	producerID    int64
+	producerEpoch int16
+
+	bufferBytes int
+	bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+	pid, epoch := parent.txnmgr.getProducerID()
+	return &produceSet{
+		msgs:          make(map[string]map[int32]*partitionSet),
+		parent:        parent,
+		producerID:    pid,
+		producerEpoch: epoch,
+	}
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+	var err error
+	var key, val []byte
+
+	if msg.Key != nil {
+		if key, err = msg.Key.Encode(); err != nil {
+			return err
+		}
+	}
+
+	if msg.Value != nil {
+		if val, err = msg.Value.Encode(); err != nil {
+			return err
+		}
+	}
+
+	timestamp := msg.Timestamp
+	if timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+	timestamp = timestamp.Truncate(time.Millisecond)
+
+	partitions := ps.msgs[msg.Topic]
+	if partitions == nil {
+		partitions = make(map[int32]*partitionSet)
+		ps.msgs[msg.Topic] = partitions
+	}
+
+	var size int
+
+	set := partitions[msg.Partition]
+	if set == nil {
+		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+			batch := &RecordBatch{
+				FirstTimestamp:   timestamp,
+				Version:          2,
+				Codec:            ps.parent.conf.Producer.Compression,
+				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+				ProducerID:       ps.producerID,
+				ProducerEpoch:    ps.producerEpoch,
+			}
+			if ps.parent.conf.Producer.Idempotent {
+				batch.FirstSequence = msg.sequenceNumber
+			}
+			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+			size = recordBatchOverhead
+		} else {
+			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+		}
+		partitions[msg.Partition] = set
+	}
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+			return errors.New("assertion failed: message out of sequence added to a batch")
+		}
+	}
+
+	// Past this point we can't return an error, because we've already added the message to the set.
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		// We are being conservative here to avoid having to prep encode the record
+		size += maximumRecordOverhead
+		rec := &Record{
+			Key:            key,
+			Value:          val,
+			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+		}
+		size += len(key) + len(val)
+		if len(msg.Headers) > 0 {
+			rec.Headers = make([]*RecordHeader, len(msg.Headers))
+			for i := range msg.Headers {
+				rec.Headers[i] = &msg.Headers[i]
+				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+			}
+		}
+		set.recordsToSend.RecordBatch.addRecord(rec)
+	} else {
+		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+			msgToSend.Timestamp = timestamp
+			msgToSend.Version = 1
+		}
+		set.recordsToSend.MsgSet.addMessage(msgToSend)
+		size = producerMessageOverhead + len(key) + len(val)
+	}
+
+	set.bufferBytes += size
+	ps.bufferBytes += size
+	ps.bufferCount++
+
+	return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+	req := &ProduceRequest{
+		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+		req.Version = 2
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 3
+		if ps.parent.IsTransactional() {
+			req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID
+		}
+	}
+	if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) {
+		req.Version = 5
+	}
+	if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 6
+	}
+	if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
+		req.Version = 7
+	}
+
+	for topic, partitionSets := range ps.msgs {
+		for partition, set := range partitionSets {
+			if req.Version >= 3 {
+				// If the API version we're hitting is 3 or greater, we need to calculate
+				// offsets for each record in the batch relative to FirstOffset.
+				// Additionally, we must set LastOffsetDelta to the value of the last offset
+				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
+				// final record of any batch will have an offset of (# of records in batch) - 1.
+				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+				//  under the RecordBatch section for details.)
+				rb := set.recordsToSend.RecordBatch
+				if len(rb.Records) > 0 {
+					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+					var maxTimestampDelta time.Duration
+					for i, record := range rb.Records {
+						record.OffsetDelta = int64(i)
+						maxTimestampDelta = max(maxTimestampDelta, record.TimestampDelta)
+					}
+					// Also set the MaxTimestamp similar to other clients.
+					rb.MaxTimestamp = rb.FirstTimestamp.Add(maxTimestampDelta)
+				}
+
+				// Set the batch as transactional when a transactionalID is set
+				rb.IsTransactional = ps.parent.IsTransactional()
+
+				req.AddBatch(topic, partition, rb)
+				continue
+			}
+			if ps.parent.conf.Producer.Compression == CompressionNone {
+				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+			} else {
+				// When compression is enabled, the entire set for each partition is compressed
+				// and sent as the payload of a single fake "message" with the appropriate codec
+				// set and no key. When the server sees a message with a compression codec, it
+				// decompresses the payload and treats the result as its message set.
+
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					// If our version is 0.10 or later, assign relative offsets
+					// to the inner messages. This lets the broker avoid
+					// recompressing the message set.
+					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+					// for details on relative offsets.)
+					for i, msg := range set.recordsToSend.MsgSet.Messages {
+						msg.Offset = int64(i)
+					}
+				}
+				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry)
+				if err != nil {
+					Logger.Println(err) // if this happens, it's basically our fault.
+					panic(err)
+				}
+				compMsg := &Message{
+					Codec:            ps.parent.conf.Producer.Compression,
+					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+					Key:              nil,
+					Value:            payload,
+					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+				}
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					compMsg.Version = 1
+					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+				}
+				req.AddMessage(topic, partition, compMsg)
+			}
+		}
+	}
+
+	return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			cb(topic, partition, set)
+		}
+	}
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+	if ps.msgs[topic] == nil {
+		return nil
+	}
+	set := ps.msgs[topic][partition]
+	if set == nil {
+		return nil
+	}
+	ps.bufferBytes -= set.bufferBytes
+	ps.bufferCount -= len(set.msgs)
+	delete(ps.msgs[topic], partition)
+	return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+	version := 1
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	switch {
+	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+	case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)):
+		return true
+	// Would we overflow the size-limit of a message-batch for this partition?
+	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+		return true
+	// Would we overflow simply in number of messages?
+	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) readyToFlush() bool {
+	switch {
+	// If we don't have any messages, nothing else matters
+	case ps.empty():
+		return false
+	// If all three config values are 0, we always flush as-fast-as-possible
+	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+		return true
+	// If we've passed the message trigger-point
+	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+		return true
+	// If we've passed the byte trigger-point
+	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) empty() bool {
+	return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/IBM/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go
new file mode 100644
index 0000000..4f33af0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/quota_types.go
@@ -0,0 +1,21 @@
+package sarama
+
+type (
+	QuotaEntityType string
+
+	QuotaMatchType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java
+const (
+	QuotaEntityUser     QuotaEntityType = "user"
+	QuotaEntityClientID QuotaEntityType = "client-id"
+	QuotaEntityIP       QuotaEntityType = "ip"
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java
+const (
+	QuotaMatchExact QuotaMatchType = iota
+	QuotaMatchDefault
+	QuotaMatchAny
+)
diff --git a/vendor/github.com/IBM/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go
new file mode 100644
index 0000000..ddeb543
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_decoder.go
@@ -0,0 +1,549 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+var (
+	errInvalidArrayLength     = PacketDecodingError{"invalid array length"}
+	errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+	errInvalidStringLength    = PacketDecodingError{"invalid string length"}
+	errVarintOverflow         = PacketDecodingError{"varint overflow"}
+	errUVarintOverflow        = PacketDecodingError{"uvarint overflow"}
+	errInvalidBool            = PacketDecodingError{"invalid bool"}
+)
+
+type realDecoder struct {
+	raw      []byte
+	off      int
+	stack    []pushDecoder
+	registry metrics.Registry
+}
+
+type realFlexibleDecoder struct {
+	*realDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+	if rd.remaining() < 1 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int8(rd.raw[rd.off])
+	rd.off++
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+	if rd.remaining() < 2 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+	rd.off += 2
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+	tmp, n := binary.Varint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	if n < 0 {
+		rd.off -= n
+		return -1, errVarintOverflow
+	}
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getUVarint() (uint64, error) {
+	tmp, n := binary.Uvarint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	if n < 0 {
+		rd.off -= n
+		return 0, errUVarintOverflow
+	}
+
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getFloat64() (float64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	// cast to int32 first to get correct signedness for length before then
+	// casting to int for ease of interop
+	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+	rd.off += 4
+	if tmp > rd.remaining() {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	} else if tmp > int(MaxResponseSize) {
+		return -1, errInvalidArrayLength
+	}
+	return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+	b, err := rd.getInt8()
+	if err != nil || b == 0 {
+		return false, err
+	}
+	if b != 1 {
+		return false, errInvalidBool
+	}
+	return true, nil
+}
+
+func (rd *realDecoder) getKError() (KError, error) {
+	i, err := rd.getInt16()
+	return KError(i), err
+}
+
+func (rd *realDecoder) getDurationMs() (time.Duration, error) {
+	t, err := rd.getInt32()
+	if err != nil {
+		return time.Duration(0), err
+	}
+	return time.Duration(t) * time.Millisecond, nil
+}
+
+func (rd *realDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+	return PacketDecodingError{"tagged fields used in non-flexible context"}
+}
+
+func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
+	return 0, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+	tmp, err := rd.getInt32()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+	tmp, err := rd.getVarint()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+	length, err := rd.getInt16()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return "", err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	if rd.remaining() < 4*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	if rd.remaining() < 8*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += 8
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+	return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+	buf, err := rd.getRawBytes(length)
+	if err != nil {
+		return nil, err
+	}
+	return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+	if length < 0 {
+		return nil, errInvalidByteSliceLength
+	} else if length > rd.remaining() {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	start := rd.off
+	rd.off += length
+	return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+	if rd.remaining() < offset+length {
+		return nil, ErrInsufficientData
+	}
+	off := rd.off + offset
+	return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+	const byteLen = 1
+	if rd.remaining() < offset+byteLen {
+		return -1, ErrInsufficientData
+	}
+	return int8(rd.raw[rd.off+offset]), nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+	in.saveOffset(rd.off)
+
+	var reserve int
+	if dpd, ok := in.(dynamicPushDecoder); ok {
+		if err := dpd.decode(rd); err != nil {
+			return err
+		}
+	} else {
+		reserve = in.reserveLength()
+		if rd.remaining() < reserve {
+			rd.off = len(rd.raw)
+			return ErrInsufficientData
+		}
+	}
+
+	rd.stack = append(rd.stack, in)
+
+	rd.off += reserve
+
+	return nil
+}
+
+func (rd *realDecoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := rd.stack[len(rd.stack)-1]
+	rd.stack = rd.stack[:len(rd.stack)-1]
+
+	return in.check(rd.off, rd.raw)
+}
+
+func (rd *realDecoder) metricRegistry() metrics.Registry {
+	return rd.registry
+}
+
+func (rd *realFlexibleDecoder) getArrayLength() (int, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if n == 0 {
+		return 0, nil
+	}
+
+	return int(n) - 1, nil
+}
+
+func (rd *realFlexibleDecoder) getEmptyTaggedFieldArray() (int, error) {
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	// skip over any tagged fields without deserializing them
+	// as we don't currently support doing anything with them
+	for i := uint64(0); i < tagCount; i++ {
+		// fetch and ignore tag identifier
+		_, err := rd.getUVarint()
+		if err != nil {
+			return 0, err
+		}
+		length, err := rd.getUVarint()
+		if err != nil {
+			return 0, err
+		}
+		if _, err := rd.getRawBytes(int(length)); err != nil {
+			return 0, err
+		}
+	}
+
+	return 0, nil
+}
+
+func (rd *realFlexibleDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+	// if we have no decoders just skip over the tagged fields
+	if decoders == nil {
+		_, err := rd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return err
+	}
+
+	for i := uint64(0); i < tagCount; i++ {
+		// fetch and ignore tag identifier
+		id, err := rd.getUVarint()
+		if err != nil {
+			return err
+		}
+		length, err := rd.getUVarint()
+		if err != nil {
+			return err
+		}
+		bytes, err := rd.getRawBytes(int(length))
+		if err != nil {
+			return err
+		}
+		decoder, ok := decoders[id]
+		if !ok {
+			continue
+		}
+		if err := decoder(&realFlexibleDecoder{&realDecoder{raw: bytes}}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (rd *realFlexibleDecoder) getBytes() ([]byte, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+	return rd.getRawBytes(length)
+}
+
+func (rd *realFlexibleDecoder) getStringLength() (int, error) {
+	length, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length - 1)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realFlexibleDecoder) getString() (string, error) {
+	length, err := rd.getStringLength()
+	if err != nil || length == -1 {
+		return "", err
+	}
+
+	if length < 0 {
+		return "", errInvalidStringLength
+	}
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return tmpStr, nil
+}
+
+func (rd *realFlexibleDecoder) getNullableString() (*string, error) {
+	length, err := rd.getStringLength()
+	if err != nil {
+		return nil, err
+	}
+
+	if length < 0 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return &tmpStr, err
+}
+
+func (rd *realFlexibleDecoder) getInt32Array() ([]int32, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	arrayLength := int(n) - 1
+
+	ret := make([]int32, arrayLength)
+
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realFlexibleDecoder) getStringArray() ([]string, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
diff --git a/vendor/github.com/IBM/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go
new file mode 100644
index 0000000..b9bd178
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_encoder.go
@@ -0,0 +1,268 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+	raw      []byte
+	off      int
+	stack    []pushEncoder
+	registry metrics.Registry
+}
+
+type realFlexibleEncoder struct {
+	*realEncoder
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+	re.raw[re.off] = byte(in)
+	re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+	re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+	re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+	re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putUVarint(in uint64) {
+	re.off += binary.PutUvarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putFloat64(in float64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+	re.putInt32(int32(in))
+	return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+	if in {
+		re.putInt8(1)
+		return
+	}
+	re.putInt8(0)
+}
+
+func (re *realEncoder) putKError(in KError) {
+	re.putInt16(int16(in))
+}
+
+func (re *realEncoder) putDurationMs(in time.Duration) {
+	re.putInt32(int32(in / time.Millisecond))
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	re.putInt32(int32(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		re.putVarint(-1)
+		return nil
+	}
+	re.putVarint(int64(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+	re.putInt16(int16(len(in)))
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt16(-1)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt64(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (re *realEncoder) offset() int {
+	return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+	in.saveOffset(re.off)
+	re.off += in.reserveLength()
+	re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := re.stack[len(re.stack)-1]
+	re.stack = re.stack[:len(re.stack)-1]
+
+	return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+	return re.registry
+}
+
+func (re *realFlexibleEncoder) putArrayLength(in int) error {
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(in + 1))
+	return nil
+}
+
+func (re *realFlexibleEncoder) putBytes(in []byte) error {
+	re.putUVarint(uint64(len(in) + 1))
+	return re.putRawBytes(in)
+}
+
+func (re *realFlexibleEncoder) putString(in string) error {
+	if err := re.putArrayLength(len(in)); err != nil {
+		return err
+	}
+	return re.putRawBytes([]byte(in))
+}
+
+func (re *realFlexibleEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt8(0)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realFlexibleEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realFlexibleEncoder) putInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realFlexibleEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		re.putUVarint(0)
+		return nil
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realFlexibleEncoder) putEmptyTaggedFieldArray() {
+	re.putUVarint(0)
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/record.go
rename to vendor/github.com/IBM/sarama/record.go
diff --git a/vendor/github.com/IBM/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go
new file mode 100644
index 0000000..c422c5c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/record_batch.go
@@ -0,0 +1,226 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+	for _, r := range e {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+	records := make([]Record, len(e))
+	for i := range e {
+		if err := records[i].decode(pd); err != nil {
+			return err
+		}
+		e[i] = &records[i]
+	}
+	return nil
+}
+
+type RecordBatch struct {
+	FirstOffset           int64
+	PartitionLeaderEpoch  int32
+	Version               int8
+	Codec                 CompressionCodec
+	CompressionLevel      int
+	Control               bool
+	LogAppendTime         bool
+	LastOffsetDelta       int32
+	FirstTimestamp        time.Time
+	MaxTimestamp          time.Time
+	ProducerID            int64
+	ProducerEpoch         int16
+	FirstSequence         int32
+	Records               []*Record
+	PartialTrailingRecord bool
+	IsTransactional       bool
+
+	compressedRecords []byte
+	recordsLen        int // uncompressed records size
+}
+
+func (b *RecordBatch) LastOffset() int64 {
+	return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+	if b.Version != 2 {
+		return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)}
+	}
+	pe.putInt64(b.FirstOffset)
+	pe.push(&lengthField{})
+	pe.putInt32(b.PartitionLeaderEpoch)
+	pe.putInt8(b.Version)
+	pe.push(newCRC32Field(crcCastagnoli))
+	pe.putInt16(b.computeAttributes())
+	pe.putInt32(b.LastOffsetDelta)
+
+	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	pe.putInt64(b.ProducerID)
+	pe.putInt16(b.ProducerEpoch)
+	pe.putInt32(b.FirstSequence)
+
+	if err := pe.putArrayLength(len(b.Records)); err != nil {
+		return err
+	}
+
+	if b.compressedRecords == nil {
+		if err := b.encodeRecords(pe); err != nil {
+			return err
+		}
+	}
+	if err := pe.putRawBytes(b.compressedRecords); err != nil {
+		return err
+	}
+
+	if err := pe.pop(); err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+	if b.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	batchLen, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if b.Version, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	crc32Decoder := acquireCrc32Field(crcCastagnoli)
+	defer releaseCrc32Field(crc32Decoder)
+
+	if err = pd.push(crc32Decoder); err != nil {
+		return err
+	}
+
+	attributes, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+	b.Control = attributes&controlMask == controlMask
+	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
+	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
+
+	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if b.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.FirstSequence, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	numRecs, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if numRecs >= 0 {
+		b.Records = make([]*Record, numRecs)
+	}
+
+	bufSize := int(batchLen) - recordBatchOverhead
+	recBuffer, err := pd.getRawBytes(bufSize)
+	if err != nil {
+		if errors.Is(err, ErrInsufficientData) {
+			b.PartialTrailingRecord = true
+			b.Records = nil
+			return nil
+		}
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	recBuffer, err = decompress(b.Codec, recBuffer)
+	if err != nil {
+		return err
+	}
+
+	b.recordsLen = len(recBuffer)
+	err = decode(recBuffer, recordsArray(b.Records), nil)
+	if errors.Is(err, ErrInsufficientData) {
+		b.PartialTrailingRecord = true
+		b.Records = nil
+		return nil
+	}
+	return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+	var raw []byte
+	var err error
+	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+		return err
+	}
+	b.recordsLen = len(raw)
+
+	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+	return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+	attr := int16(b.Codec) & int16(compressionCodecMask)
+	if b.Control {
+		attr |= controlMask
+	}
+	if b.LogAppendTime {
+		attr |= timestampTypeMask
+	}
+	if b.IsTransactional {
+		attr |= isTransactionalMask
+	}
+	return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+	b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/IBM/sarama/records.go b/vendor/github.com/IBM/sarama/records.go
new file mode 100644
index 0000000..ec03d71
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/records.go
@@ -0,0 +1,219 @@
+package sarama
+
+import "fmt"
+
+const (
+	unknownRecords = iota
+	legacyRecords
+	defaultRecords
+
+	magicOffset = 16
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+	recordsType int
+	MsgSet      *MessageSet
+	RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+	return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+	return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+	if r.MsgSet == nil && r.RecordBatch == nil {
+		return true, nil
+	}
+	if r.MsgSet != nil && r.RecordBatch != nil {
+		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+	}
+	r.recordsType = defaultRecords
+	if r.MsgSet != nil {
+		r.recordsType = legacyRecords
+	}
+	return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return nil
+		}
+		return r.MsgSet.encode(pe)
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil
+		}
+		return r.RecordBatch.encode(pe)
+	}
+
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+	magic, err := magicValue(pd)
+	if err != nil {
+		return err
+	}
+
+	r.recordsType = defaultRecords
+	if magic < 2 {
+		r.recordsType = legacyRecords
+	}
+
+	return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+	if r.recordsType == unknownRecords {
+		if err := r.setTypeFromMagic(pd); err != nil {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		r.MsgSet = &MessageSet{}
+		return r.MsgSet.decode(pd)
+	case defaultRecords:
+		r.RecordBatch = &RecordBatch{}
+		return r.RecordBatch.decode(pd)
+	}
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return 0, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return 0, nil
+		}
+		return len(r.MsgSet.Messages), nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return 0, nil
+		}
+		return len(r.RecordBatch.Records), nil
+	}
+	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.PartialTrailingMessage, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.PartialTrailingRecord, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		return false, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.Control, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.OverflowMessage, nil
+	case defaultRecords:
+		return false, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) nextOffset() (*int64, error) {
+	switch r.recordsType {
+	case unknownRecords:
+		return nil, nil
+	case legacyRecords:
+		return nil, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil, nil
+		}
+		nextOffset := r.RecordBatch.LastOffset() + 1
+		return &nextOffset, nil
+	}
+	return nil, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+	return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+	if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 {
+		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
+	}
+
+	firstRecord := r.RecordBatch.Records[0]
+	controlRecord := ControlRecord{}
+	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+	if err != nil {
+		return ControlRecord{}, err
+	}
+
+	return controlRecord, nil
+}
diff --git a/vendor/github.com/IBM/sarama/request.go b/vendor/github.com/IBM/sarama/request.go
new file mode 100644
index 0000000..83a1c46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/request.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+type protocolBody interface {
+	encoder
+	versionedDecoder
+	key() int16
+	version() int16
+	setVersion(int16)
+	headerVersion() int16
+	isValidVersion() bool
+	requiredVersion() KafkaVersion
+}
+
+type request struct {
+	correlationID int32
+	clientID      string
+	body          protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) error {
+	pe.push(&lengthField{})
+	pe.putInt16(r.body.key())
+	pe.putInt16(r.body.version())
+	pe.putInt32(r.correlationID)
+
+	if r.body.headerVersion() >= 1 {
+		err := pe.putString(r.clientID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.body.headerVersion() >= 2 {
+		// we don't use tag headers at the moment so we just put an array length of 0
+		pe.putUVarint(0)
+	}
+	pe = prepareFlexibleEncoder(pe, r.body)
+
+	err := r.body.encode(pe)
+	if err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+	key, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	version, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	r.clientID, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	r.body = allocateBody(key, version)
+	if r.body == nil {
+		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+	}
+
+	if r.body.headerVersion() >= 2 {
+		// tagged field
+		_, err = pd.getUVarint()
+		if err != nil {
+			return err
+		}
+	}
+
+	if decoder, ok := pd.(*realDecoder); ok {
+		pd = prepareFlexibleDecoder(decoder, r.body, version)
+	}
+	return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (*request, int, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if n, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, n, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if n, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, bytesRead + n, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	req := &request{}
+	if err := decode(encodedReq, req, nil); err != nil {
+		return nil, bytesRead, err
+	}
+
+	return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+	switch key {
+	case apiKeyProduce:
+		return &ProduceRequest{Version: version}
+	case apiKeyFetch:
+		return &FetchRequest{Version: version}
+	case apiKeyListOffsets:
+		return &OffsetRequest{Version: version}
+	case apiKeyMetadata:
+		return &MetadataRequest{Version: version}
+	// 4: LeaderAndIsrRequest
+	// 5: StopReplicaRequest
+	// 6: UpdateMetadataRequest
+	// 7: ControlledShutdownRequest
+	case apiKeyOffsetCommit:
+		return &OffsetCommitRequest{Version: version}
+	case apiKeyOffsetFetch:
+		return &OffsetFetchRequest{Version: version}
+	case apiKeyFindCoordinator:
+		return &FindCoordinatorRequest{Version: version}
+	case apiKeyJoinGroup:
+		return &JoinGroupRequest{Version: version}
+	case apiKeyHeartbeat:
+		return &HeartbeatRequest{Version: version}
+	case apiKeyLeaveGroup:
+		return &LeaveGroupRequest{Version: version}
+	case apiKeySyncGroup:
+		return &SyncGroupRequest{Version: version}
+	case apiKeyDescribeGroups:
+		return &DescribeGroupsRequest{Version: version}
+	case apiKeyListGroups:
+		return &ListGroupsRequest{Version: version}
+	case apiKeySaslHandshake:
+		return &SaslHandshakeRequest{Version: version}
+	case apiKeyApiVersions:
+		return &ApiVersionsRequest{Version: version}
+	case apiKeyCreateTopics:
+		return &CreateTopicsRequest{Version: version}
+	case apiKeyDeleteTopics:
+		return &DeleteTopicsRequest{Version: version}
+	case apiKeyDeleteRecords:
+		return &DeleteRecordsRequest{Version: version}
+	case apiKeyInitProducerId:
+		return &InitProducerIDRequest{Version: version}
+	// 23: OffsetForLeaderEpochRequest
+	case apiKeyAddPartitionsToTxn:
+		return &AddPartitionsToTxnRequest{Version: version}
+	case apiKeyAddOffsetsToTxn:
+		return &AddOffsetsToTxnRequest{Version: version}
+	case apiKeyEndTxn:
+		return &EndTxnRequest{Version: version}
+	// 27: WriteTxnMarkersRequest
+	case apiKeyTxnOffsetCommit:
+		return &TxnOffsetCommitRequest{Version: version}
+	case apiKeyDescribeAcls:
+		return &DescribeAclsRequest{Version: int(version)}
+	case apiKeyCreateAcls:
+		return &CreateAclsRequest{Version: version}
+	case apiKeyDeleteAcls:
+		return &DeleteAclsRequest{Version: int(version)}
+	case apiKeyDescribeConfigs:
+		return &DescribeConfigsRequest{Version: version}
+	case apiKeyAlterConfigs:
+		return &AlterConfigsRequest{Version: version}
+	// 34: AlterReplicaLogDirsRequest
+	case apiKeyDescribeLogDirs:
+		return &DescribeLogDirsRequest{Version: version}
+	case apiKeySASLAuth:
+		return &SaslAuthenticateRequest{Version: version}
+	case apiKeyCreatePartitions:
+		return &CreatePartitionsRequest{Version: version}
+	// 38: CreateDelegationTokenRequest
+	// 39: RenewDelegationTokenRequest
+	// 40: ExpireDelegationTokenRequest
+	// 41: DescribeDelegationTokenRequest
+	case apiKeyDeleteGroups:
+		return &DeleteGroupsRequest{Version: version}
+	case apiKeyElectLeaders:
+		return &ElectLeadersRequest{Version: version}
+	case apiKeyIncrementalAlterConfigs:
+		return &IncrementalAlterConfigsRequest{Version: version}
+	case apiKeyAlterPartitionReassignments:
+		return &AlterPartitionReassignmentsRequest{Version: version}
+	case apiKeyListPartitionReassignments:
+		return &ListPartitionReassignmentsRequest{Version: version}
+	case apiKeyOffsetDelete:
+		return &DeleteOffsetsRequest{Version: version}
+	case apiKeyDescribeClientQuotas:
+		return &DescribeClientQuotasRequest{Version: version}
+	case apiKeyAlterClientQuotas:
+		return &AlterClientQuotasRequest{Version: version}
+	case apiKeyDescribeUserScramCredentials:
+		return &DescribeUserScramCredentialsRequest{Version: version}
+	case apiKeyAlterUserScramCredentials:
+		return &AlterUserScramCredentialsRequest{Version: version}
+		// 52: VoteRequest
+		// 53: BeginQuorumEpochRequest
+		// 54: EndQuorumEpochRequest
+		// 55: DescribeQuorumRequest
+		// 56: AlterPartitionRequest
+		// 57: UpdateFeaturesRequest
+		// 58: EnvelopeRequest
+		// 59: FetchSnapshotRequest
+		// 60: DescribeClusterRequest
+		// 61: DescribeProducersRequest
+		// 62: BrokerRegistrationRequest
+		// 63: BrokerHeartbeatRequest
+		// 64: UnregisterBrokerRequest
+		// 65: DescribeTransactionsRequest
+		// 66: ListTransactionsRequest
+		// 67: AllocateProducerIdsRequest
+		// 68: ConsumerGroupHeartbeatRequest
+	}
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go
new file mode 100644
index 0000000..88b0b44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/response_header.go
@@ -0,0 +1,33 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+	length        int32
+	correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		if decoder, ok := pd.(*realDecoder); ok {
+			pd = &realFlexibleDecoder{decoder}
+		} else {
+			return PacketDecodingError{"failed to instantiate flexible decoder"}
+		}
+	}
+	r.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if r.length <= 4 || r.length > MaxResponseSize {
+		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+	}
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go
new file mode 100644
index 0000000..4d5f60a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sarama.go
@@ -0,0 +1,139 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use Consumer or Consumer-Group API.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+	| Name                                                    | Type       | Description                                                   |
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+	| incoming-byte-rate                                      | meter      | Bytes/second read off all brokers                             |
+	| incoming-byte-rate-for-broker-<broker-id>               | meter      | Bytes/second read off a given broker                          |
+	| outgoing-byte-rate                                      | meter      | Bytes/second written off all brokers                          |
+	| outgoing-byte-rate-for-broker-<broker-id>               | meter      | Bytes/second written off a given broker                       |
+	| request-rate                                            | meter      | Requests/second sent to all brokers                           |
+	| request-rate-for-broker-<broker-id>                     | meter      | Requests/second sent to a given broker                        |
+	| request-size                                            | histogram  | Distribution of the request size in bytes for all brokers     |
+	| request-size-for-broker-<broker-id>                     | histogram  | Distribution of the request size in bytes for a given broker  |
+	| request-latency-in-ms                                   | histogram  | Distribution of the request latency in ms for all brokers     |
+	| request-latency-in-ms-for-broker-<broker-id>            | histogram  | Distribution of the request latency in ms for a given broker  |
+	| response-rate                                           | meter      | Responses/second received from all brokers                    |
+	| response-rate-for-broker-<broker-id>                    | meter      | Responses/second received from a given broker                 |
+	| response-size                                           | histogram  | Distribution of the response size in bytes for all brokers    |
+	| response-size-for-broker-<broker-id>                    | histogram  | Distribution of the response size in bytes for a given broker |
+	| requests-in-flight                                      | counter    | The current number of in-flight requests awaiting a response  |
+	|                                                         |            | for all brokers                                               |
+	| requests-in-flight-for-broker-<broker-id>               | counter    | The current number of in-flight requests awaiting a response  |
+	|                                                         |            | for a given broker                                            |
+	| protocol-requests-rate-<api-key>          	          | meter      | Number of api requests sent to the brokers for all brokers    |
+	|                                                         |            | https://kafka.apache.org/protocol.html#protocol_api_keys      |                                        |
+	| protocol-requests-rate-<api-key>-for-broker-<broker-id> | meter      | Number of packets sent to the brokers by api-key for a given  |
+	|                                                         |            | broker                                                        |
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
+	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
+	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
+	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
+	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
+	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
+	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
+	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+Consumer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
+	| consumer-fetch-rate                       | meter      | Fetch requests/second sent to all brokers                                            |
+	| consumer-fetch-rate-for-broker-<broker>   | meter      | Fetch requests/second sent to a given broker                                         |
+	| consumer-fetch-rate-for-topic-<topic>     | meter      | Fetch requests/second sent for a given topic                                         |
+	| consumer-fetch-response-size              | histogram  | Distribution of the fetch response size in bytes                                     |
+	| consumer-group-join-total-<GroupID>       | counter    | Total count of consumer group join attempts                                          |
+	| consumer-group-join-failed-<GroupID>      | counter    | Total count of consumer group join failures                                          |
+	| consumer-group-sync-total-<GroupID>       | counter    | Total count of consumer group sync attempts                                          |
+	| consumer-group-sync-failed-<GroupID>      | counter    | Total count of consumer group sync failures                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+*/
+package sarama
+
+import (
+	"io"
+	"log"
+)
+
+var (
+	// Logger is the instance of a StdLogger interface that Sarama writes connection
+	// management events to. By default it is set to discard all log messages via io.Discard,
+	// but you can set it to redirect wherever you want.
+	Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags)
+
+	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+	PanicHandler func(interface{})
+
+	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+	// to process.
+	MaxRequestSize int32 = 100 * 1024 * 1024
+
+	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+	MaxResponseSize int32 = 100 * 1024 * 1024
+)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+	Println(v ...interface{})
+}
+
+type debugLogger struct{}
+
+func (d *debugLogger) Print(v ...interface{}) {
+	Logger.Print(v...)
+}
+func (d *debugLogger) Printf(format string, v ...interface{}) {
+	Logger.Printf(format, v...)
+}
+func (d *debugLogger) Println(v ...interface{}) {
+	Logger.Println(v...)
+}
+
+// DebugLogger is the instance of a StdLogger that Sarama writes more verbose
+// debug information to. By default it is set to redirect all debug to the
+// default Logger above, but you can optionally set it to another StdLogger
+// instance to (e.g.,) discard debug information
+var DebugLogger StdLogger = &debugLogger{}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
new file mode 100644
index 0000000..f53a991
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
@@ -0,0 +1,46 @@
+package sarama
+
+type SaslAuthenticateRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version       int16
+	SaslAuthBytes []byte
+}
+
+func (r *SaslAuthenticateRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
+	return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.SaslAuthBytes, err = pd.getBytes()
+	return err
+}
+
+func (r *SaslAuthenticateRequest) key() int16 {
+	return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SaslAuthenticateRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *SaslAuthenticateRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_2_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
new file mode 100644
index 0000000..60752e8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
@@ -0,0 +1,75 @@
+package sarama
+
+type SaslAuthenticateResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version           int16
+	Err               KError
+	ErrorMessage      *string
+	SaslAuthBytes     []byte
+	SessionLifetimeMs int64
+}
+
+func (r *SaslAuthenticateResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.Err)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+	if err := pe.putBytes(r.SaslAuthBytes); err != nil {
+		return err
+	}
+	if r.Version > 0 {
+		pe.putInt64(r.SessionLifetimeMs)
+	}
+	return nil
+}
+
+func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if r.SaslAuthBytes, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	if version > 0 {
+		r.SessionLifetimeMs, err = pd.getInt64()
+	}
+
+	return err
+}
+
+func (r *SaslAuthenticateResponse) key() int16 {
+	return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SaslAuthenticateResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *SaslAuthenticateResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_2_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..cae8536
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
@@ -0,0 +1,51 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+	Mechanism string
+	Version   int16
+}
+
+func (r *SaslHandshakeRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Mechanism); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.Mechanism, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+	return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SaslHandshakeRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *SaslHandshakeRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	default:
+		return V0_10_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..3974589
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
@@ -0,0 +1,54 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+	Version           int16
+	Err               KError
+	EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.Err)
+	return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+	return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SaslHandshakeResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	default:
+		return V0_10_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/scram_formatter.go
rename to vendor/github.com/IBM/sarama/scram_formatter.go
diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties
new file mode 100644
index 0000000..21ba1c7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/server.properties
@@ -0,0 +1,138 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
+# See kafka.server.KafkaConfig for additional details and defaults
+#
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. If not configured, the host name will be equal to the value of
+# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
+#   FORMAT:
+#     listeners = listener_name://host_name:port
+#   EXAMPLE:
+#     listeners = PLAINTEXT://your.host.name:9092
+#listeners=PLAINTEXT://:9092
+
+# Listener name, hostname and port the broker will advertise to clients.
+# If not set, it uses the value for "listeners".
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings  #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+#log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=18000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
diff --git a/vendor/github.com/IBM/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go
new file mode 100644
index 0000000..b109cd9
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_request.go
@@ -0,0 +1,184 @@
+package sarama
+
+type SyncGroupRequestAssignment struct {
+	// MemberId contains the ID of the member to assign.
+	MemberId string
+	// Assignment contains the member assignment.
+	Assignment []byte
+}
+
+func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) {
+	if err := pe.putString(a.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(a.Assignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) {
+	if a.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if a.Assignment, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type SyncGroupRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// GroupId contains the unique group identifier.
+	GroupId string
+	// GenerationId contains the generation of the group.
+	GenerationId int32
+	// MemberId contains the member ID assigned by the group.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance provided by end user.
+	GroupInstanceId *string
+	// GroupAssignments contains each assignment.
+	GroupAssignments []SyncGroupRequestAssignment
+}
+
+func (s *SyncGroupRequest) setVersion(v int16) {
+	s.Version = v
+}
+
+func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(s.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(s.GenerationId)
+
+	if err := pe.putString(s.MemberId); err != nil {
+		return err
+	}
+
+	if s.Version >= 3 {
+		if err := pe.putNullableString(s.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil {
+		return err
+	}
+	for _, block := range s.GroupAssignments {
+		if err := block.encode(pe, s.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	s.Version = version
+	if s.GroupId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if s.GenerationId, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if s.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if s.Version >= 3 {
+		if s.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	if numAssignments, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numAssignments > 0 {
+		s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments)
+		for i := 0; i < numAssignments; i++ {
+			var block SyncGroupRequestAssignment
+			if err := block.decode(pd, s.Version); err != nil {
+				return err
+			}
+			s.GroupAssignments[i] = block
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *SyncGroupRequest) key() int16 {
+	return apiKeySyncGroup
+}
+
+func (r *SyncGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SyncGroupRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *SyncGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+	r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{
+		MemberId:   memberId,
+		Assignment: memberAssignment,
+	})
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(
+	memberId string,
+	memberAssignment *ConsumerGroupMemberAssignment,
+) error {
+	bin, err := encode(memberAssignment, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupAssignment(memberId, bin)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go
new file mode 100644
index 0000000..a605ced
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_response.go
@@ -0,0 +1,108 @@
+package sarama
+
+import "time"
+
+type SyncGroupResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration in milliseconds for which the
+	// request was throttled due to a quota violation, or zero if the request
+	// did not violate any quota.
+	ThrottleTime int32
+	// Err contains the error code, or 0 if there was no error.
+	Err KError
+	// MemberAssignment contains the member assignment.
+	MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(r.MemberAssignment, assignment, nil)
+	return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	if err := pe.putBytes(r.MemberAssignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	r.MemberAssignment, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *SyncGroupResponse) key() int16 {
+	return apiKeySyncGroup
+}
+
+func (r *SyncGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SyncGroupResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *SyncGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *SyncGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go
new file mode 100644
index 0000000..f6876fb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_producer.go
@@ -0,0 +1,209 @@
+package sarama
+
+import "sync"
+
+var expectationsPool = sync.Pool{
+	New: func() interface{} {
+		return make(chan *ProducerError, 1)
+	},
+}
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+	// SendMessage produces a given message, and returns only when it either has
+	// succeeded or failed to produce. It will return the partition and the offset
+	// of the produced message, or an error if the message failed to produce.
+	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+	// SendMessages produces a given set of messages, and returns only when all
+	// messages in the set have either succeeded or failed. Note that messages
+	// can succeed and fail individually; if some succeed and some fail,
+	// SendMessages will return an error.
+	SendMessages(msgs []*ProducerMessage) error
+
+	// Close shuts down the producer; you must call this function before a producer
+	// object passes out of scope, as it may otherwise leak memory.
+	// You must call this before calling Close on the underlying client.
+	Close() error
+
+	// TxnStatus return current producer transaction status.
+	TxnStatus() ProducerTxnStatusFlag
+
+	// IsTransactional return true when current producer is transactional.
+	IsTransactional() bool
+
+	// BeginTxn mark current transaction as ready.
+	BeginTxn() error
+
+	// CommitTxn commit current transaction.
+	CommitTxn() error
+
+	// AbortTxn abort current transaction.
+	AbortTxn() error
+
+	// AddOffsetsToTxn add associated offsets to current transaction.
+	AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+	// AddMessageToTxn add message offsets to current transaction.
+	AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type syncProducer struct {
+	producer *asyncProducer
+	wg       sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+	if config == nil {
+		config = NewConfig()
+		config.Producer.Return.Successes = true
+	}
+
+	if err := verifyProducerConfig(config); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducer(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+	if err := verifyProducerConfig(client.Config()); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+	sp := &syncProducer{producer: p}
+
+	sp.wg.Add(2)
+	go withRecover(sp.handleSuccesses)
+	go withRecover(sp.handleErrors)
+
+	return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+	if !config.Producer.Return.Errors {
+		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+	}
+	if !config.Producer.Return.Successes {
+		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+	}
+	return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+	expectation := expectationsPool.Get().(chan *ProducerError)
+	msg.expectation = expectation
+	sp.producer.Input() <- msg
+	pErr := <-expectation
+	msg.expectation = nil
+	expectationsPool.Put(expectation)
+	if pErr != nil {
+		return -1, -1, pErr.Err
+	}
+
+	return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+	indices := make(chan int, len(msgs))
+	go func() {
+		for i, msg := range msgs {
+			expectation := expectationsPool.Get().(chan *ProducerError)
+			msg.expectation = expectation
+			sp.producer.Input() <- msg
+			indices <- i
+		}
+		close(indices)
+	}()
+
+	var errors ProducerErrors
+	for i := range indices {
+		expectation := msgs[i].expectation
+		pErr := <-expectation
+		msgs[i].expectation = nil
+		expectationsPool.Put(expectation)
+		if pErr != nil {
+			errors = append(errors, pErr)
+		}
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+	defer sp.wg.Done()
+	for msg := range sp.producer.Successes() {
+		expectation := msg.expectation
+		expectation <- nil
+	}
+}
+
+func (sp *syncProducer) handleErrors() {
+	defer sp.wg.Done()
+	for err := range sp.producer.Errors() {
+		expectation := err.Msg.expectation
+		expectation <- err
+	}
+}
+
+func (sp *syncProducer) Close() error {
+	sp.producer.AsyncClose()
+	sp.wg.Wait()
+	return nil
+}
+
+func (sp *syncProducer) IsTransactional() bool {
+	return sp.producer.IsTransactional()
+}
+
+func (sp *syncProducer) BeginTxn() error {
+	return sp.producer.BeginTxn()
+}
+
+func (sp *syncProducer) CommitTxn() error {
+	return sp.producer.CommitTxn()
+}
+
+func (sp *syncProducer) AbortTxn() error {
+	return sp.producer.AbortTxn()
+}
+
+func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+	return sp.producer.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+	return sp.producer.AddMessageToTxn(msg, groupId, metadata)
+}
+
+func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag {
+	return p.producer.TxnStatus()
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/timestamp.go
rename to vendor/github.com/IBM/sarama/timestamp.go
diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go
new file mode 100644
index 0000000..bf20b75
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/transaction_manager.go
@@ -0,0 +1,930 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// ProducerTxnStatusFlag mark current transaction status.
+type ProducerTxnStatusFlag int16
+
+const (
+	// ProducerTxnFlagUninitialized when txnmgr is created
+	ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota
+	// ProducerTxnFlagInitializing when txnmgr is initializing
+	ProducerTxnFlagInitializing
+	// ProducerTxnFlagReady when is ready to receive transaction
+	ProducerTxnFlagReady
+	// ProducerTxnFlagInTransaction when transaction is started
+	ProducerTxnFlagInTransaction
+	// ProducerTxnFlagEndTransaction when transaction will be committed
+	ProducerTxnFlagEndTransaction
+	// ProducerTxnFlagInError when having abortable or fatal error
+	ProducerTxnFlagInError
+	// ProducerTxnFlagCommittingTransaction when committing txn
+	ProducerTxnFlagCommittingTransaction
+	// ProducerTxnFlagAbortingTransaction when committing txn
+	ProducerTxnFlagAbortingTransaction
+	// ProducerTxnFlagAbortableError when producer encounter an abortable error
+	// Must call AbortTxn in this case.
+	ProducerTxnFlagAbortableError
+	// ProducerTxnFlagFatalError when producer encounter an fatal error
+	// Must Close an recreate it.
+	ProducerTxnFlagFatalError
+)
+
+func (s ProducerTxnStatusFlag) String() string {
+	status := make([]string, 0)
+	if s&ProducerTxnFlagUninitialized != 0 {
+		status = append(status, "ProducerTxnStateUninitialized")
+	}
+	if s&ProducerTxnFlagInitializing != 0 {
+		status = append(status, "ProducerTxnStateInitializing")
+	}
+	if s&ProducerTxnFlagReady != 0 {
+		status = append(status, "ProducerTxnStateReady")
+	}
+	if s&ProducerTxnFlagInTransaction != 0 {
+		status = append(status, "ProducerTxnStateInTransaction")
+	}
+	if s&ProducerTxnFlagEndTransaction != 0 {
+		status = append(status, "ProducerTxnStateEndTransaction")
+	}
+	if s&ProducerTxnFlagInError != 0 {
+		status = append(status, "ProducerTxnStateInError")
+	}
+	if s&ProducerTxnFlagCommittingTransaction != 0 {
+		status = append(status, "ProducerTxnStateCommittingTransaction")
+	}
+	if s&ProducerTxnFlagAbortingTransaction != 0 {
+		status = append(status, "ProducerTxnStateAbortingTransaction")
+	}
+	if s&ProducerTxnFlagAbortableError != 0 {
+		status = append(status, "ProducerTxnStateAbortableError")
+	}
+	if s&ProducerTxnFlagFatalError != 0 {
+		status = append(status, "ProducerTxnStateFatalError")
+	}
+	return strings.Join(status, "|")
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+	producerID         int64
+	producerEpoch      int16
+	sequenceNumbers    map[string]int32
+	mutex              sync.Mutex
+	transactionalID    string
+	transactionTimeout time.Duration
+	client             Client
+
+	// when kafka cluster is at least 2.5.0.
+	// used to recover when producer failed.
+	coordinatorSupportsBumpingEpoch bool
+
+	// When producer need to bump it's epoch.
+	epochBumpRequired bool
+	// Record last seen error.
+	lastError error
+
+	// Ensure that status is never accessed with a race-condition.
+	statusLock sync.RWMutex
+	status     ProducerTxnStatusFlag
+
+	// Ensure that only one goroutine will update partitions in current transaction.
+	partitionInTxnLock            sync.Mutex
+	pendingPartitionsInCurrentTxn topicPartitionSet
+	partitionsInCurrentTxn        topicPartitionSet
+
+	// Offsets to add to transaction.
+	offsetsInCurrentTxn map[string]topicPartitionOffsets
+}
+
+const (
+	noProducerID    = -1
+	noProducerEpoch = -1
+
+	// see publishTxnPartitions comment.
+	addPartitionsRetryBackoff = 20 * time.Millisecond
+)
+
+// txnmngr allowed transitions.
+var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{
+	ProducerTxnFlagUninitialized: {
+		ProducerTxnFlagReady,
+		ProducerTxnFlagInError,
+	},
+	// When we need are initializing
+	ProducerTxnFlagInitializing: {
+		ProducerTxnFlagInitializing,
+		ProducerTxnFlagReady,
+		ProducerTxnFlagInError,
+	},
+	// When we have initialized transactional producer
+	ProducerTxnFlagReady: {
+		ProducerTxnFlagInTransaction,
+	},
+	// When beginTxn has been called
+	ProducerTxnFlagInTransaction: {
+		// When calling commit or abort
+		ProducerTxnFlagEndTransaction,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	ProducerTxnFlagEndTransaction: {
+		// When epoch bump
+		ProducerTxnFlagInitializing,
+		// When commit is good
+		ProducerTxnFlagReady,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	// Need to abort transaction
+	ProducerTxnFlagAbortableError: {
+		// Call AbortTxn
+		ProducerTxnFlagAbortingTransaction,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	// Need to close producer
+	ProducerTxnFlagFatalError: {
+		ProducerTxnFlagFatalError,
+	},
+}
+
+type topicPartition struct {
+	topic     string
+	partition int32
+}
+
+// to ensure that we don't do a full scan every time a partition or an offset is added.
+type (
+	topicPartitionSet     map[topicPartition]struct{}
+	topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata
+)
+
+func (s topicPartitionSet) mapToRequest() map[string][]int32 {
+	result := make(map[string][]int32, len(s))
+	for tp := range s {
+		result[tp.topic] = append(result[tp.topic], tp.partition)
+	}
+	return result
+}
+
+func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata {
+	result := make(map[string][]*PartitionOffsetMetadata, len(s))
+	for tp, offset := range s {
+		result[tp.topic] = append(result[tp.topic], offset)
+	}
+	return result
+}
+
+// Return true if current transition is allowed.
+func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool {
+	for status, allowedTransitions := range producerTxnTransitions {
+		if status&t.status != 0 {
+			for _, allowedTransition := range allowedTransitions {
+				if allowedTransition&target != 0 {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+// Get current transaction status.
+func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag {
+	t.statusLock.RLock()
+	defer t.statusLock.RUnlock()
+
+	return t.status
+}
+
+// Try to transition to a valid status and return an error otherwise.
+func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error {
+	t.statusLock.Lock()
+	defer t.statusLock.Unlock()
+
+	if !t.isTransitionValid(target) {
+		return ErrTransitionNotAllowed
+	}
+
+	if target&ProducerTxnFlagInError != 0 {
+		if err == nil {
+			return ErrCannotTransitionNilError
+		}
+		t.lastError = err
+	} else {
+		t.lastError = nil
+	}
+
+	DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target)
+
+	t.status = target
+	return err
+}
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
+	key := fmt.Sprintf("%s-%d", topic, partition)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	sequence := t.sequenceNumbers[key]
+	t.sequenceNumbers[key] = sequence + 1
+	return sequence, t.producerEpoch
+}
+
+func (t *transactionManager) bumpEpoch() {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.producerEpoch++
+	for k := range t.sequenceNumbers {
+		t.sequenceNumbers[k] = 0
+	}
+}
+
+func (t *transactionManager) getProducerID() (int64, int16) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return t.producerID, t.producerEpoch
+}
+
+// Compute retry backoff considered current attempts.
+func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration {
+	if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+		maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+		retries := maxRetries - attemptsRemaining
+		return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+	}
+	return t.client.Config().Producer.Transaction.Retry.Backoff
+}
+
+// return true is txnmngr is transactinal.
+func (t *transactionManager) isTransactional() bool {
+	return t.transactionalID != ""
+}
+
+// add specified offsets to current transaction.
+func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+		return ErrTransactionNotReady
+	}
+
+	if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	if _, ok := t.offsetsInCurrentTxn[groupId]; !ok {
+		t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{}
+	}
+
+	for topic, offsets := range offsetsToAdd {
+		for _, offset := range offsets {
+			tp := topicPartition{topic: topic, partition: offset.Partition}
+			t.offsetsInCurrentTxn[groupId][tp] = offset
+		}
+	}
+	return nil
+}
+
+// send txnmgnr save offsets to transaction coordinator.
+func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) {
+	// First AddOffsetsToTxn
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	lastError := exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &AddOffsetsToTxnRequest{
+			TransactionalID: t.transactionalID,
+			ProducerEpoch:   t.producerEpoch,
+			ProducerID:      t.producerID,
+			GroupID:         groupId,
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		response, err := coordinator.AddOffsetsToTxn(request)
+		if err != nil {
+			// If an error occurred try to refresh current transaction coordinator.
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+		if response == nil {
+			// If no response is returned just retry.
+			return true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n",
+				t.transactionalID, groupId, response)
+			// If no error, just exit.
+			return false, nil
+		}
+		switch response.Err {
+		case ErrConsumerCoordinatorNotAvailable:
+			fallthrough
+		case ErrNotCoordinatorForConsumer:
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			fallthrough
+		case ErrOffsetsLoadInProgress:
+			fallthrough
+		case ErrConcurrentTransactions:
+			// Retry
+		case ErrUnknownProducerID:
+			fallthrough
+		case ErrInvalidProducerIDMapping:
+			return false, t.abortableErrorIfPossible(response.Err)
+		case ErrGroupAuthorizationFailed:
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+		default:
+			// Others are fatal
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return true, response.Err
+	}, nil)
+
+	if lastError != nil {
+		return offsets, lastError
+	}
+
+	resultOffsets := offsets
+	// Then TxnOffsetCommit
+	// note the result is not completed until the TxnOffsetCommit returns
+	attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max
+	execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) {
+		var r topicPartitionOffsets
+		for attemptsRemaining >= 0 {
+			var retry bool
+			r, retry, err = run()
+			if !retry {
+				return r, err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return r, err
+	}
+	return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) {
+		consumerGroupCoordinator, err := t.client.Coordinator(groupId)
+		if err != nil {
+			return resultOffsets, true, err
+		}
+		request := &TxnOffsetCommitRequest{
+			TransactionalID: t.transactionalID,
+			ProducerEpoch:   t.producerEpoch,
+			ProducerID:      t.producerID,
+			GroupID:         groupId,
+			Topics:          offsets.mapToRequest(),
+		}
+		if t.client.Config().Version.IsAtLeast(V2_1_0_0) {
+			// Version 2 adds the committed leader epoch.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		responses, err := consumerGroupCoordinator.TxnOffsetCommit(request)
+		if err != nil {
+			_ = consumerGroupCoordinator.Close()
+			_ = t.client.RefreshCoordinator(groupId)
+			return resultOffsets, true, err
+		}
+
+		if responses == nil {
+			return resultOffsets, true, ErrTxnUnableToParseResponse
+		}
+
+		var responseErrors []error
+		failedTxn := topicPartitionOffsets{}
+		for topic, partitionErrors := range responses.Topics {
+			for _, partitionError := range partitionErrors {
+				switch partitionError.Err {
+				case ErrNoError:
+					continue
+				// If the topic is unknown or the coordinator is loading, retry with the current coordinator
+				case ErrRequestTimedOut:
+					fallthrough
+				case ErrConsumerCoordinatorNotAvailable:
+					fallthrough
+				case ErrNotCoordinatorForConsumer:
+					_ = consumerGroupCoordinator.Close()
+					_ = t.client.RefreshCoordinator(groupId)
+					fallthrough
+				case ErrUnknownTopicOrPartition:
+					fallthrough
+				case ErrOffsetsLoadInProgress:
+					// Do nothing just retry
+				case ErrIllegalGeneration:
+					fallthrough
+				case ErrUnknownMemberId:
+					fallthrough
+				case ErrFencedInstancedId:
+					fallthrough
+				case ErrGroupAuthorizationFailed:
+					return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err)
+				default:
+					// Others are fatal
+					return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err)
+				}
+				tp := topicPartition{topic: topic, partition: partitionError.Partition}
+				failedTxn[tp] = offsets[tp]
+				responseErrors = append(responseErrors, partitionError.Err)
+			}
+		}
+
+		resultOffsets = failedTxn
+
+		if len(resultOffsets) == 0 {
+			DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n",
+				t.transactionalID, groupId)
+			return resultOffsets, false, nil
+		}
+		return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...)
+	}, nil)
+}
+
+func (t *transactionManager) initProducerId() (int64, int16, error) {
+	isEpochBump := false
+
+	req := &InitProducerIDRequest{}
+	if t.isTransactional() {
+		req.TransactionalID = &t.transactionalID
+		req.TransactionTimeout = t.transactionTimeout
+	}
+
+	if t.client.Config().Version.IsAtLeast(V2_5_0_0) {
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 4 adds the support for new error code PRODUCER_FENCED.
+			req.Version = 4
+		} else {
+			// Version 3 adds ProducerId and ProducerEpoch, allowing producers to try
+			// to resume after an INVALID_PRODUCER_EPOCH error
+			req.Version = 3
+		}
+		isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch
+		t.coordinatorSupportsBumpingEpoch = true
+		req.ProducerID = t.producerID
+		req.ProducerEpoch = t.producerEpoch
+	} else if t.client.Config().Version.IsAtLeast(V2_4_0_0) {
+		// Version 2 is the first flexible version.
+		req.Version = 2
+	} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+		// Version 1 is the same as version 0.
+		req.Version = 1
+	}
+
+	if isEpochBump {
+		err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+		if err != nil {
+			return -1, -1, err
+		}
+		DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n",
+			t.transactionalID)
+	} else {
+		DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n",
+			t.transactionalID, t.producerID, t.producerEpoch)
+	}
+
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) {
+		pid := int64(-1)
+		pepoch := int16(-1)
+		for attemptsRemaining >= 0 {
+			var retry bool
+			pid, pepoch, retry, err = run()
+			if !retry {
+				return pid, pepoch, err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return -1, -1, err
+	}
+	return exec(func() (int64, int16, bool, error) {
+		var err error
+		var coordinator *Broker
+		if t.isTransactional() {
+			coordinator, err = t.client.TransactionCoordinator(t.transactionalID)
+		} else {
+			coordinator = t.client.LeastLoadedBroker()
+		}
+		if err != nil {
+			return -1, -1, true, err
+		}
+		response, err := coordinator.InitProducerID(req)
+		if err != nil {
+			if t.isTransactional() {
+				_ = coordinator.Close()
+				_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			}
+			return -1, -1, true, err
+		}
+		if response == nil {
+			return -1, -1, true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			if isEpochBump {
+				t.sequenceNumbers = make(map[string]int32)
+			}
+			err := t.transitionTo(ProducerTxnFlagReady, nil)
+			if err != nil {
+				return -1, -1, true, err
+			}
+			DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n",
+				t.transactionalID, response)
+			return response.ProducerID, response.ProducerEpoch, false, nil
+		}
+		switch response.Err {
+		// Retriable errors
+		case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress:
+			if t.isTransactional() {
+				_ = coordinator.Close()
+				_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			}
+		// Fatal errors
+		default:
+			return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return -1, -1, true, response.Err
+	}, nil)
+}
+
+// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal.
+func (t *transactionManager) abortableErrorIfPossible(err error) error {
+	if t.coordinatorSupportsBumpingEpoch {
+		t.epochBumpRequired = true
+		return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+	}
+	return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+}
+
+// End current transaction.
+func (t *transactionManager) completeTransaction() error {
+	if t.epochBumpRequired {
+		err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := t.transitionTo(ProducerTxnFlagReady, nil)
+		if err != nil {
+			return err
+		}
+	}
+
+	t.lastError = nil
+	t.epochBumpRequired = false
+	t.partitionsInCurrentTxn = topicPartitionSet{}
+	t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+	t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{}
+
+	return nil
+}
+
+// send EndTxn request with commit flag. (true when committing false otherwise)
+func (t *transactionManager) endTxn(commit bool) error {
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	return exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &EndTxnRequest{
+			TransactionalID:   t.transactionalID,
+			ProducerEpoch:     t.producerEpoch,
+			ProducerID:        t.producerID,
+			TransactionResult: commit,
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		response, err := coordinator.EndTxn(request)
+		if err != nil {
+			// Always retry on network error
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+		if response == nil {
+			return true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n",
+				t.transactionalID, response)
+			return false, t.completeTransaction()
+		}
+		switch response.Err {
+		// Need to refresh coordinator
+		case ErrConsumerCoordinatorNotAvailable:
+			fallthrough
+		case ErrNotCoordinatorForConsumer:
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			fallthrough
+		case ErrOffsetsLoadInProgress:
+			fallthrough
+		case ErrConcurrentTransactions:
+			// Just retry
+		case ErrUnknownProducerID:
+			fallthrough
+		case ErrInvalidProducerIDMapping:
+			return false, t.abortableErrorIfPossible(response.Err)
+		// Fatal errors
+		default:
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return true, response.Err
+	}, nil)
+}
+
+// We will try to publish associated offsets for each groups
+// then send endtxn request to mark transaction as finished.
+func (t *transactionManager) finishTransaction(commit bool) error {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	// Ensure no error when committing or aborting
+	if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return t.lastError
+	} else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	// if no records has been sent don't do anything.
+	if len(t.partitionsInCurrentTxn) == 0 {
+		return t.completeTransaction()
+	}
+
+	epochBump := t.epochBumpRequired
+	// If we're aborting the transaction, so there should be no need to add offsets.
+	if commit && len(t.offsetsInCurrentTxn) > 0 {
+		for group, offsets := range t.offsetsInCurrentTxn {
+			newOffsets, err := t.publishOffsetsToTxn(offsets, group)
+			if err != nil {
+				t.offsetsInCurrentTxn[group] = newOffsets
+				return err
+			}
+			delete(t.offsetsInCurrentTxn, group)
+		}
+	}
+
+	if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) {
+		err := t.endTxn(commit)
+		if err != nil {
+			return err
+		}
+		if !epochBump {
+			return nil
+		}
+	}
+	// reset pid and epoch if needed.
+	return t.initializeTransactions()
+}
+
+// called before sending any transactional record
+// won't do anything if current topic-partition is already added to transaction.
+func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) {
+	if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return
+	}
+
+	tp := topicPartition{topic: topic, partition: partition}
+
+	t.partitionInTxnLock.Lock()
+	defer t.partitionInTxnLock.Unlock()
+	if _, ok := t.partitionsInCurrentTxn[tp]; ok {
+		// partition is already added
+		return
+	}
+
+	t.pendingPartitionsInCurrentTxn[tp] = struct{}{}
+}
+
+// Makes a request to kafka to add a list of partitions ot the current transaction.
+func (t *transactionManager) publishTxnPartitions() error {
+	t.partitionInTxnLock.Lock()
+	defer t.partitionInTxnLock.Unlock()
+
+	if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return t.lastError
+	}
+
+	if len(t.pendingPartitionsInCurrentTxn) == 0 {
+		return nil
+	}
+
+	// Remove the partitions from the pending set regardless of the result. We use the presence
+	// of partitions in the pending set to know when it is not safe to send batches. However, if
+	// the partitions failed to be added and we enter an error state, we expect the batches to be
+	// aborted anyway. In this case, we must be able to continue sending the batches which are in
+	// retry for partitions that were successfully added.
+	removeAllPartitionsOnFatalOrAbortedError := func() {
+		t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+	}
+
+	// We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
+	// CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
+	// we don't want to wait too long before trying to start the new one.
+	//
+	// This is only a temporary fix, the long term solution is being tracked in
+	// https://issues.apache.org/jira/browse/KAFKA-5482
+	retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff
+	computeBackoff := func(attemptsRemaining int) time.Duration {
+		if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+			maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+			retries := maxRetries - attemptsRemaining
+			return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+		}
+		return retryBackoff
+	}
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	return exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &AddPartitionsToTxnRequest{
+			TransactionalID: t.transactionalID,
+			ProducerID:      t.producerID,
+			ProducerEpoch:   t.producerEpoch,
+			TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(),
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		addPartResponse, err := coordinator.AddPartitionsToTxn(request)
+		if err != nil {
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+
+		if addPartResponse == nil {
+			return true, ErrTxnUnableToParseResponse
+		}
+
+		// remove from the list partitions that have been successfully updated
+		var responseErrors []error
+		for topic, results := range addPartResponse.Errors {
+			for _, response := range results {
+				tp := topicPartition{topic: topic, partition: response.Partition}
+				switch response.Err {
+				case ErrNoError:
+					// Mark partition as added to transaction
+					t.partitionsInCurrentTxn[tp] = struct{}{}
+					delete(t.pendingPartitionsInCurrentTxn, tp)
+					continue
+				case ErrConsumerCoordinatorNotAvailable:
+					fallthrough
+				case ErrNotCoordinatorForConsumer:
+					_ = coordinator.Close()
+					_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+					fallthrough
+				case ErrUnknownTopicOrPartition:
+					fallthrough
+				case ErrOffsetsLoadInProgress:
+					// Retry topicPartition
+				case ErrConcurrentTransactions:
+					if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff {
+						retryBackoff = addPartitionsRetryBackoff
+					}
+				case ErrOperationNotAttempted:
+					fallthrough
+				case ErrTopicAuthorizationFailed:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+				case ErrUnknownProducerID:
+					fallthrough
+				case ErrInvalidProducerIDMapping:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.abortableErrorIfPossible(response.Err)
+				// Fatal errors
+				default:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+				}
+				responseErrors = append(responseErrors, response.Err)
+			}
+		}
+
+		// handle end
+		if len(t.pendingPartitionsInCurrentTxn) == 0 {
+			DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n",
+				t.transactionalID, addPartResponse)
+			return false, nil
+		}
+		return true, Wrap(ErrAddPartitionsToTxn, responseErrors...)
+	}, nil)
+}
+
+// Build a new transaction manager sharing producer client.
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+	txnmgr := &transactionManager{
+		producerID:                    noProducerID,
+		producerEpoch:                 noProducerEpoch,
+		client:                        client,
+		pendingPartitionsInCurrentTxn: topicPartitionSet{},
+		partitionsInCurrentTxn:        topicPartitionSet{},
+		offsetsInCurrentTxn:           make(map[string]topicPartitionOffsets),
+		status:                        ProducerTxnFlagUninitialized,
+	}
+
+	if conf.Producer.Idempotent {
+		txnmgr.transactionalID = conf.Producer.Transaction.ID
+		txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout
+		txnmgr.sequenceNumbers = make(map[string]int32)
+		txnmgr.mutex = sync.Mutex{}
+
+		var err error
+		txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId()
+		if err != nil {
+			return nil, err
+		}
+		Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n",
+			txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch)
+	}
+
+	return txnmgr, nil
+}
+
+// re-init producer-id and producer-epoch if needed.
+func (t *transactionManager) initializeTransactions() (err error) {
+	t.producerID, t.producerEpoch, err = t.initProducerId()
+	return
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..f390172
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
@@ -0,0 +1,166 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+	Version         int16
+	TransactionalID string
+	GroupID         string
+	ProducerID      int64
+	ProducerEpoch   int16
+	Topics          map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) setVersion(v int16) {
+	t.Version = v
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(t.TransactionalID); err != nil {
+		return err
+	}
+	if err := pe.putString(t.GroupID); err != nil {
+		return err
+	}
+	pe.putInt64(t.ProducerID)
+	pe.putInt16(t.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+	for topic, partitions := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for _, partition := range partitions {
+			if err := partition.encode(pe, t.Version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	if t.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionOffsetMetadata)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+		for j := 0; j < m; j++ {
+			partitionOffsetMetadata := new(PartitionOffsetMetadata)
+			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+				return err
+			}
+			t.Topics[topic][j] = partitionOffsetMetadata
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+	return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+	return a.Version
+}
+
+func (a *TxnOffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *TxnOffsetCommitRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_1_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+type PartitionOffsetMetadata struct {
+	// Partition contains the index of the partition within the topic.
+	Partition int32
+	// Offset contains the message offset to be committed.
+	Offset int64
+	// LeaderEpoch contains the leader epoch of the last consumed record.
+	LeaderEpoch int32
+	// Metadata contains any associated metadata the client wants to keep.
+	Metadata *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error {
+	pe.putInt32(p.Partition)
+	pe.putInt64(p.Offset)
+
+	if version >= 2 {
+		pe.putInt32(p.LeaderEpoch)
+	}
+
+	if err := pe.putNullableString(p.Metadata); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if p.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if p.Metadata, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..19bcad3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+	"time"
+)
+
+type TxnOffsetCommitResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) setVersion(v int16) {
+	t.Version = v
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(t.ThrottleTime)
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+
+	for topic, e := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			t.Topics[topic][j] = new(PartitionError)
+			if err := t.Topics[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+	return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+	return a.Version
+}
+
+func (a *TxnOffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_1_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *TxnOffsetCommitResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go
new file mode 100644
index 0000000..9b87cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/utils.go
@@ -0,0 +1,393 @@
+package sarama
+
+import (
+	"bufio"
+	"fmt"
+	"math/rand"
+	"net"
+	"regexp"
+	"time"
+)
+
+const (
+	defaultRetryBackoff    = 100 * time.Millisecond
+	defaultRetryMaxBackoff = 1000 * time.Millisecond
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+	return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+	return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+	ret := make([]int32, 0, len(input))
+	ret = append(ret, input...)
+	return ret
+}
+
+func withRecover(fn func()) {
+	defer func() {
+		handler := PanicHandler
+		if handler != nil {
+			if err := recover(); err != nil {
+				handler(err)
+			}
+		}
+	}()
+
+	fn()
+}
+
+func safeAsyncClose(b *Broker) {
+	go withRecover(func() {
+		if connected, _ := b.Connected(); connected {
+			if err := b.Close(); err != nil {
+				Logger.Println("Error closing broker", b.ID(), ":", err)
+			}
+		}
+	})
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+	Encode() ([]byte, error)
+	Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+	return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+	return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+	return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+	return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+	net.Conn
+	buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+	return &bufConn{
+		Conn: conn,
+		buf:  bufio.NewReader(conn),
+	}
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+	return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+	// it's a struct rather than just typing the array directly to make it opaque and stop people
+	// generating their own arbitrary versions
+	version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+	return KafkaVersion{
+		version: [4]uint{major, minor, veryMinor, patch},
+	}
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//
+//	V1.IsAtLeast(V2) // false
+//	V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+	for i := range v.version {
+		if v.version[i] > other.version[i] {
+			return true
+		} else if v.version[i] < other.version[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
+	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
+	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
+	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
+	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
+	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+	V0_10_2_2 = newKafkaVersion(0, 10, 2, 2)
+	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
+	V1_0_1_0  = newKafkaVersion(1, 0, 1, 0)
+	V1_0_2_0  = newKafkaVersion(1, 0, 2, 0)
+	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
+	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
+	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
+	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
+	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
+	V2_1_1_0  = newKafkaVersion(2, 1, 1, 0)
+	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
+	V2_2_1_0  = newKafkaVersion(2, 2, 1, 0)
+	V2_2_2_0  = newKafkaVersion(2, 2, 2, 0)
+	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_3_1_0  = newKafkaVersion(2, 3, 1, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
+	V2_4_1_0  = newKafkaVersion(2, 4, 1, 0)
+	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
+	V2_5_1_0  = newKafkaVersion(2, 5, 1, 0)
+	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
+	V2_6_1_0  = newKafkaVersion(2, 6, 1, 0)
+	V2_6_2_0  = newKafkaVersion(2, 6, 2, 0)
+	V2_6_3_0  = newKafkaVersion(2, 6, 3, 0)
+	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
+	V2_7_1_0  = newKafkaVersion(2, 7, 1, 0)
+	V2_7_2_0  = newKafkaVersion(2, 7, 2, 0)
+	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
+	V2_8_1_0  = newKafkaVersion(2, 8, 1, 0)
+	V2_8_2_0  = newKafkaVersion(2, 8, 2, 0)
+	V3_0_0_0  = newKafkaVersion(3, 0, 0, 0)
+	V3_0_1_0  = newKafkaVersion(3, 0, 1, 0)
+	V3_0_2_0  = newKafkaVersion(3, 0, 2, 0)
+	V3_1_0_0  = newKafkaVersion(3, 1, 0, 0)
+	V3_1_1_0  = newKafkaVersion(3, 1, 1, 0)
+	V3_1_2_0  = newKafkaVersion(3, 1, 2, 0)
+	V3_2_0_0  = newKafkaVersion(3, 2, 0, 0)
+	V3_2_1_0  = newKafkaVersion(3, 2, 1, 0)
+	V3_2_2_0  = newKafkaVersion(3, 2, 2, 0)
+	V3_2_3_0  = newKafkaVersion(3, 2, 3, 0)
+	V3_3_0_0  = newKafkaVersion(3, 3, 0, 0)
+	V3_3_1_0  = newKafkaVersion(3, 3, 1, 0)
+	V3_3_2_0  = newKafkaVersion(3, 3, 2, 0)
+	V3_4_0_0  = newKafkaVersion(3, 4, 0, 0)
+	V3_4_1_0  = newKafkaVersion(3, 4, 1, 0)
+	V3_5_0_0  = newKafkaVersion(3, 5, 0, 0)
+	V3_5_1_0  = newKafkaVersion(3, 5, 1, 0)
+	V3_5_2_0  = newKafkaVersion(3, 5, 2, 0)
+	V3_6_0_0  = newKafkaVersion(3, 6, 0, 0)
+	V3_6_1_0  = newKafkaVersion(3, 6, 1, 0)
+	V3_6_2_0  = newKafkaVersion(3, 6, 2, 0)
+	V3_7_0_0  = newKafkaVersion(3, 7, 0, 0)
+	V3_7_1_0  = newKafkaVersion(3, 7, 1, 0)
+	V3_7_2_0  = newKafkaVersion(3, 7, 2, 0)
+	V3_8_0_0  = newKafkaVersion(3, 8, 0, 0)
+	V3_8_1_0  = newKafkaVersion(3, 8, 1, 0)
+	V3_9_0_0  = newKafkaVersion(3, 9, 0, 0)
+	V3_9_1_0  = newKafkaVersion(3, 9, 1, 0)
+	V4_0_0_0  = newKafkaVersion(4, 0, 0, 0)
+	V4_1_0_0  = newKafkaVersion(4, 1, 0, 0)
+
+	SupportedVersions = []KafkaVersion{
+		V0_8_2_0,
+		V0_8_2_1,
+		V0_8_2_2,
+		V0_9_0_0,
+		V0_9_0_1,
+		V0_10_0_0,
+		V0_10_0_1,
+		V0_10_1_0,
+		V0_10_1_1,
+		V0_10_2_0,
+		V0_10_2_1,
+		V0_10_2_2,
+		V0_11_0_0,
+		V0_11_0_1,
+		V0_11_0_2,
+		V1_0_0_0,
+		V1_0_1_0,
+		V1_0_2_0,
+		V1_1_0_0,
+		V1_1_1_0,
+		V2_0_0_0,
+		V2_0_1_0,
+		V2_1_0_0,
+		V2_1_1_0,
+		V2_2_0_0,
+		V2_2_1_0,
+		V2_2_2_0,
+		V2_3_0_0,
+		V2_3_1_0,
+		V2_4_0_0,
+		V2_4_1_0,
+		V2_5_0_0,
+		V2_5_1_0,
+		V2_6_0_0,
+		V2_6_1_0,
+		V2_6_2_0,
+		V2_6_3_0,
+		V2_7_0_0,
+		V2_7_1_0,
+		V2_7_2_0,
+		V2_8_0_0,
+		V2_8_1_0,
+		V2_8_2_0,
+		V3_0_0_0,
+		V3_0_1_0,
+		V3_0_2_0,
+		V3_1_0_0,
+		V3_1_1_0,
+		V3_1_2_0,
+		V3_2_0_0,
+		V3_2_1_0,
+		V3_2_2_0,
+		V3_2_3_0,
+		V3_3_0_0,
+		V3_3_1_0,
+		V3_3_2_0,
+		V3_4_0_0,
+		V3_4_1_0,
+		V3_5_0_0,
+		V3_5_1_0,
+		V3_5_2_0,
+		V3_6_0_0,
+		V3_6_1_0,
+		V3_6_2_0,
+		V3_7_0_0,
+		V3_7_1_0,
+		V3_7_2_0,
+		V3_8_0_0,
+		V3_8_1_0,
+		V3_9_0_0,
+		V3_9_1_0,
+		V4_0_0_0,
+		V4_1_0_0,
+	}
+	MinVersion     = V0_8_2_0
+	MaxVersion     = V4_1_0_0
+	DefaultVersion = V2_1_0_0
+
+	// reduced set of protocol versions to matrix test
+	fvtRangeVersions = []KafkaVersion{
+		V0_8_2_2,
+		V0_10_2_2,
+		V1_0_2_0,
+		V1_1_1_0,
+		V2_0_1_0,
+		V2_2_2_0,
+		V2_4_1_0,
+		V2_6_3_0,
+		V2_8_2_0,
+		V3_1_2_0,
+		V3_3_2_0,
+		V3_6_2_0,
+	}
+)
+
+var (
+	// This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3
+	validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`)
+
+	// This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0
+	validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
+)
+
+// ParseKafkaVersion parses and returns kafka version or error from a string
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+	if len(s) < 5 {
+		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
+	}
+	var major, minor, veryMinor, patch uint
+	var err error
+	if s[0] == '0' {
+		err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+	} else {
+		err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+	}
+	if err != nil {
+		return DefaultVersion, err
+	}
+	return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error {
+	if !pattern.MatchString(s) {
+		return fmt.Errorf("invalid version `%s`", s)
+	}
+	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+	return err
+}
+
+func (v KafkaVersion) String() string {
+	if v.version[0] == 0 {
+		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+	}
+
+	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
+
+// NewExponentialBackoff returns a function that implements an exponential backoff strategy with jitter.
+// It follows KIP-580, implementing the formula:
+// MIN(retry.backoff.max.ms, (retry.backoff.ms * 2**(failures - 1)) * random(0.8, 1.2))
+// This ensures retries start with `backoff` and exponentially increase until `maxBackoff`, with added jitter.
+// The behavior when `failures = 0` is not explicitly defined in KIP-580 and is left to implementation discretion.
+//
+// Example usage:
+//
+//	backoffFunc := sarama.NewExponentialBackoff(config.Producer.Retry.Backoff, 2*time.Second)
+//	config.Producer.Retry.BackoffFunc = backoffFunc
+func NewExponentialBackoff(backoff time.Duration, maxBackoff time.Duration) func(retries, maxRetries int) time.Duration {
+	if backoff <= 0 {
+		backoff = defaultRetryBackoff
+	}
+	if maxBackoff <= 0 {
+		maxBackoff = defaultRetryMaxBackoff
+	}
+
+	if backoff > maxBackoff {
+		Logger.Println("Warning: backoff is greater than maxBackoff, using maxBackoff instead.")
+		backoff = maxBackoff
+	}
+
+	return func(retries, maxRetries int) time.Duration {
+		if retries <= 0 {
+			return backoff
+		}
+
+		calculatedBackoff := backoff * time.Duration(1<<(retries-1))
+		jitter := 0.8 + 0.4*rand.Float64()
+		calculatedBackoff = time.Duration(float64(calculatedBackoff) * jitter)
+
+		return min(calculatedBackoff, maxBackoff)
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/version.go b/vendor/github.com/IBM/sarama/version.go
new file mode 100644
index 0000000..d3b9d53
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/version.go
@@ -0,0 +1,27 @@
+package sarama
+
+import (
+	"runtime/debug"
+	"sync"
+)
+
+var (
+	v     string
+	vOnce sync.Once
+)
+
+func version() string {
+	vOnce.Do(func() {
+		bi, ok := debug.ReadBuildInfo()
+		if ok {
+			v = bi.Main.Version
+		}
+		if v == "" || v == "(devel)" {
+			// if we can't read a go module version then they're using a git
+			// clone or vendored module so all we can do is report "dev" for
+			// the version to make a valid ApiVersions request
+			v = "dev"
+		}
+	})
+	return v
+}
diff --git a/vendor/github.com/IBM/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go
new file mode 100644
index 0000000..6073ce7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/zstd.go
@@ -0,0 +1,74 @@
+package sarama
+
+import (
+	"sync"
+
+	"github.com/klauspost/compress/zstd"
+)
+
+// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders
+// If the pool of encoders is exhausted then new encoders will be created on the fly
+const zstdMaxBufferedEncoders = 1
+
+type ZstdEncoderParams struct {
+	Level int
+}
+type ZstdDecoderParams struct {
+}
+
+var zstdDecMap sync.Map
+
+var zstdAvailableEncoders sync.Map
+
+func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder {
+	if c, ok := zstdAvailableEncoders.Load(params); ok {
+		return c.(chan *zstd.Encoder)
+	}
+	c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders))
+	return c.(chan *zstd.Encoder)
+}
+
+func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder {
+	select {
+	case enc := <-getZstdEncoderChannel(params):
+		return enc
+	default:
+		encoderLevel := zstd.SpeedDefault
+		if params.Level != CompressionLevelDefault {
+			encoderLevel = zstd.EncoderLevelFromZstd(params.Level)
+		}
+		zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true),
+			zstd.WithEncoderLevel(encoderLevel),
+			zstd.WithEncoderConcurrency(1))
+		return zstdEnc
+	}
+}
+
+func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) {
+	select {
+	case getZstdEncoderChannel(params) <- enc:
+	default:
+	}
+}
+
+func getDecoder(params ZstdDecoderParams) *zstd.Decoder {
+	if ret, ok := zstdDecMap.Load(params); ok {
+		return ret.(*zstd.Decoder)
+	}
+	// It's possible to race and create multiple new readers.
+	// Only one will survive GC after use.
+	zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
+	zstdDecMap.Store(params, zstdDec)
+	return zstdDec
+}
+
+func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) {
+	return getDecoder(params).DecodeAll(src, dst)
+}
+
+func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) {
+	enc := getZstdEncoder(params)
+	out := enc.EncodeAll(src, dst)
+	releaseEncoder(params, enc)
+	return out, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
deleted file mode 100644
index 2c9adc2..0000000
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-*.test
-
-# Folders
-_obj
-_test
-.vagrant
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-coverage.txt
-profile.out
-
-simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml
deleted file mode 100644
index 09e5c46..0000000
--- a/vendor/github.com/Shopify/sarama/.golangci.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-run:
-  timeout: 5m
-  deadline: 10m
-
-linters-settings:
-  govet:
-    check-shadowing: false
-  golint:
-    min-confidence: 0
-  gocyclo:
-    min-complexity: 99
-  maligned:
-    suggest-new: true
-  dupl:
-    threshold: 100
-  goconst:
-    min-len: 2
-    min-occurrences: 3
-  misspell:
-    locale: US
-  goimports:
-    local-prefixes: github.com/Shopify/sarama
-  gocritic:
-    enabled-tags:
-      - diagnostic
-      - experimental
-      - opinionated
-      - performance
-      - style
-    disabled-checks:
-      - wrapperFunc
-      - ifElseChain
-  funlen:
-    lines: 300
-    statements: 300
-
-linters:
-  disable-all: true
-  enable:
-    - bodyclose
-    - deadcode
-    - depguard
-    - dogsled
-    # - dupl
-    - errcheck
-    - funlen
-    - gochecknoinits
-    # - goconst
-    # - gocritic
-    - gocyclo
-    - gofmt
-    - goimports
-    # - golint
-    - gosec
-    # - gosimple
-    - govet
-    # - ineffassign
-    # - misspell
-    # - nakedret
-    # - scopelint
-    - staticcheck
-    - structcheck
-    # - stylecheck
-    - typecheck
-    - unconvert
-    - unused
-    - varcheck
-    - whitespace
-
-issues:
-  exclude:
-    - "G404: Use of weak random number generator"
-  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
-  max-same-issues: 0
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
deleted file mode 100644
index 59ccd1d..0000000
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ /dev/null
@@ -1,1019 +0,0 @@
-# Changelog
-
-#### Unreleased
-
-#### Version 1.28.0 (2021-02-15)
-
-**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
-
-- #1870 - @kvch - Update Kerberos library to latest major
-- #1876 - @bai - Update docs, reference pkg.go.dev
-- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
-- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
-- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
-- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
-- #1862 - @bai - Fix CI setenv permissions issues
-- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
-- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
-
-#### Version 1.27.2 (2020-10-21)
-
-# Improvements
-
-#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
-
-# Fixes
-
-#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
-
-#### Version 1.27.1 (2020-10-07)
-
-# Improvements
-
-#1775 - @d1egoaz - Adds a Producer Interceptor example
-#1781 - @justin-chen - Refresh brokers given list of seed brokers
-#1784 - @justin-chen - Add randomize seed broker method
-#1790 - @d1egoaz - remove example binary
-#1798 - @bai - Test against Go 1.15
-#1785 - @justin-chen - Add private method to Client interface to prevent implementation
-#1802 - @uvw - Support Go 1.13 error unwrapping
-
-# Fixes
-
-#1791 - @stanislavkozlovski - bump default version to 1.0.0
-
-#### Version 1.27.0 (2020-08-11)
-
-# Improvements
-
-#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
-#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
-#1699 - @wclaeys  - Consumer group support for manually comitting offsets
-#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
-#1726 - @d1egoaz - Include zstd on the functional tests
-#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
-#1738 - @varun06 - fixed variable names that are named same as some std lib package names
-#1741 - @varun06 - updated zstd dependency to latest v1.10.10
-#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
-#1763 - @alrs - remove deprecated tls options from test
-#1769 - @bai - Add support for Kafka 2.6.0
-
-# Fixes
-
-#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
-#1744 - @alrs  - Fix isBalanced Function Signature
-
-#### Version 1.26.4 (2020-05-19)
-
-# Fixes
-
-- #1701 - @d1egoaz - Set server name only for the current broker
-- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
-
-#### Version 1.26.3 (2020-05-07)
-
-# Fixes
-
-- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
-
-#### Version 1.26.2 (2020-05-06)
-
-# ⚠️ Known Issues
-
-This release has been marked as not ready for production and may be unstable, please use v1.26.4.
-
-# Improvements
-
-- #1560 - @iyacontrol - add sync pool for gzip 1-9
-- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
-- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
-- #1632 - @bai - Add support for Go 1.14
-- #1640 - @random-dwi - Feature/fix list partition reassignments
-- #1646 - @mimaison - Add DescribeLogDirs to admin client
-- #1667 - @bai - Add support for kafka 2.5.0
-
-# Fixes
-
-- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
-- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
-- #1602 - @d1egoaz - adds a note about consumer groups Consume method
-- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
-- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
-- #1614 - @alrs - produce_response.go: Remove Unused Functions
-- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
-- #1639 - @agriffaut - Handle errors with no message but error code
-- #1643 - @kzinglzy - fix `config.net.keepalive`
-- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
-- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
-- #1650 - @lavoiesl - Return the response error in heartbeatLoop
-- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
-- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
-
-#### Version 1.26.1 (2020-02-04)
-
-Improvements:
-- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
-- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
-- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
-- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
-
-Bug Fixes:
-- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
-- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
-
-#### Version 1.26.0 (2020-01-24)
-
-New Features:
-- Enable zstd compression
-  ([1574](https://github.com/Shopify/sarama/pull/1574),
-  [1582](https://github.com/Shopify/sarama/pull/1582))
-- Support headers in tools kafka-console-producer
-  ([1549](https://github.com/Shopify/sarama/pull/1549))
-
-Improvements:
-- Add SASL AuthIdentity to SASL frames (authzid)
-  ([1585](https://github.com/Shopify/sarama/pull/1585)).
-
-Bug Fixes:
-- Sending messages with ZStd compression enabled fails in multiple ways
-  ([1252](https://github.com/Shopify/sarama/issues/1252)).
-- Use the broker for any admin on BrokerConfig
-  ([1571](https://github.com/Shopify/sarama/pull/1571)).
-- Set DescribeConfigRequest Version field
-  ([1576](https://github.com/Shopify/sarama/pull/1576)).
-- ConsumerGroup flooding logs with client/metadata update req
-  ([1578](https://github.com/Shopify/sarama/pull/1578)).
-- MetadataRequest version in DescribeCluster
-  ([1580](https://github.com/Shopify/sarama/pull/1580)).
-- Fix deadlock in consumer group handleError
-  ([1581](https://github.com/Shopify/sarama/pull/1581))
-- Fill in the Fetch{Request,Response} protocol
-  ([1582](https://github.com/Shopify/sarama/pull/1582)).
-- Retry topic request on ControllerNotAvailable
-  ([1586](https://github.com/Shopify/sarama/pull/1586)).
-
-#### Version 1.25.0 (2020-01-13)
-
-New Features:
-- Support TLS protocol in kafka-producer-performance
-  ([1538](https://github.com/Shopify/sarama/pull/1538)).
-- Add support for kafka 2.4.0
-  ([1552](https://github.com/Shopify/sarama/pull/1552)).
-
-Improvements:
-- Allow the Consumer to disable auto-commit offsets
-  ([1164](https://github.com/Shopify/sarama/pull/1164)).
-- Produce records with consistent timestamps
-  ([1455](https://github.com/Shopify/sarama/pull/1455)).
-
-Bug Fixes:
-- Fix incorrect SetTopicMetadata name mentions
-  ([1534](https://github.com/Shopify/sarama/pull/1534)).
-- Fix client.tryRefreshMetadata Println
-  ([1535](https://github.com/Shopify/sarama/pull/1535)).
-- Fix panic on calling updateMetadata on closed client
-  ([1531](https://github.com/Shopify/sarama/pull/1531)).
-- Fix possible faulty metrics in TestFuncProducing
-  ([1545](https://github.com/Shopify/sarama/pull/1545)).
-
-#### Version 1.24.1 (2019-10-31)
-
-New Features:
-- Add DescribeLogDirs Request/Response pair
-  ([1520](https://github.com/Shopify/sarama/pull/1520)).
-
-Bug Fixes:
-- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
-  ([1518](https://github.com/Shopify/sarama/pull/1518)).
-- Fix issue with consumergroup not rebalancing when new partition is added
-  ([1525](https://github.com/Shopify/sarama/pull/1525)).
-- Ensure consistent use of read/write deadlines
-  ([1529](https://github.com/Shopify/sarama/pull/1529)).
-
-#### Version 1.24.0 (2019-10-09)
-
-New Features:
-- Add sticky partition assignor
-  ([1416](https://github.com/Shopify/sarama/pull/1416)).
-- Switch from cgo zstd package to pure Go implementation
-  ([1477](https://github.com/Shopify/sarama/pull/1477)).
-
-Improvements:
-- Allow creating ClusterAdmin from client
-  ([1415](https://github.com/Shopify/sarama/pull/1415)).
-- Set KafkaVersion in ListAcls method
-  ([1452](https://github.com/Shopify/sarama/pull/1452)).
-- Set request version in CreateACL ClusterAdmin method
-  ([1458](https://github.com/Shopify/sarama/pull/1458)).
-- Set request version in DeleteACL ClusterAdmin method
-  ([1461](https://github.com/Shopify/sarama/pull/1461)).
-- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
-  ([1464](https://github.com/Shopify/sarama/pull/1464)).
-- Remove direct usage of gofork
-  ([1465](https://github.com/Shopify/sarama/pull/1465)).
-- Add support for Go 1.13
-  ([1478](https://github.com/Shopify/sarama/pull/1478)).
-- Improve behavior of NewMockListAclsResponse
-  ([1481](https://github.com/Shopify/sarama/pull/1481)).
-
-Bug Fixes:
-- Fix race condition in consumergroup example
-  ([1434](https://github.com/Shopify/sarama/pull/1434)).
-- Fix brokerProducer goroutine leak
-  ([1442](https://github.com/Shopify/sarama/pull/1442)).
-- Use released version of lz4 library
-  ([1469](https://github.com/Shopify/sarama/pull/1469)).
-- Set correct version in MockDeleteTopicsResponse
-  ([1484](https://github.com/Shopify/sarama/pull/1484)).
-- Fix CLI help message typo
-  ([1494](https://github.com/Shopify/sarama/pull/1494)).
-
-Known Issues:
-- Please **don't** use Zstd, as it doesn't work right now.
-  See https://github.com/Shopify/sarama/issues/1252
-
-#### Version 1.23.1 (2019-07-22)
-
-Bug Fixes:
-- Fix fetch delete bug record
-  ([1425](https://github.com/Shopify/sarama/pull/1425)).
-- Handle SASL/OAUTHBEARER token rejection
-  ([1428](https://github.com/Shopify/sarama/pull/1428)).
-
-#### Version 1.23.0 (2019-07-02)
-
-New Features:
-- Add support for Kafka 2.3.0
-  ([1418](https://github.com/Shopify/sarama/pull/1418)).
-- Add support for ListConsumerGroupOffsets v2
-  ([1374](https://github.com/Shopify/sarama/pull/1374)).
-- Add support for DeleteConsumerGroup
-  ([1417](https://github.com/Shopify/sarama/pull/1417)).
-- Add support for SASLVersion configuration
-  ([1410](https://github.com/Shopify/sarama/pull/1410)).
-- Add kerberos support
-  ([1366](https://github.com/Shopify/sarama/pull/1366)).
-
-Improvements:
-- Improve sasl_scram_client example
-  ([1406](https://github.com/Shopify/sarama/pull/1406)).
-- Fix shutdown and race-condition in consumer-group example
-  ([1404](https://github.com/Shopify/sarama/pull/1404)).
-- Add support for error codes 77—81
-  ([1397](https://github.com/Shopify/sarama/pull/1397)).
-- Pool internal objects allocated per message
-  ([1385](https://github.com/Shopify/sarama/pull/1385)).
-- Reduce packet decoder allocations
-  ([1373](https://github.com/Shopify/sarama/pull/1373)).
-- Support timeout when fetching metadata
-  ([1359](https://github.com/Shopify/sarama/pull/1359)).
-
-Bug Fixes:
-- Fix fetch size integer overflow
-  ([1376](https://github.com/Shopify/sarama/pull/1376)).
-- Handle and log throttled FetchResponses
-  ([1383](https://github.com/Shopify/sarama/pull/1383)).
-- Refactor misspelled word Resouce to Resource
-  ([1368](https://github.com/Shopify/sarama/pull/1368)).
-
-#### Version 1.22.1 (2019-04-29)
-
-Improvements:
-- Use zstd 1.3.8
-  ([1350](https://github.com/Shopify/sarama/pull/1350)).
-- Add support for SaslHandshakeRequest v1
-  ([1354](https://github.com/Shopify/sarama/pull/1354)).
-
-Bug Fixes:
-- Fix V5 MetadataRequest nullable topics array
-  ([1353](https://github.com/Shopify/sarama/pull/1353)).
-- Use a different SCRAM client for each broker connection
-  ([1349](https://github.com/Shopify/sarama/pull/1349)).
-- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
-  ([1344](https://github.com/Shopify/sarama/pull/1344)).
-
-#### Version 1.22.0 (2019-04-09)
-
-New Features:
-- Add Offline Replicas Operation to Client
-  ([1318](https://github.com/Shopify/sarama/pull/1318)).
-- Allow using proxy when connecting to broker
-  ([1326](https://github.com/Shopify/sarama/pull/1326)).
-- Implement ReadCommitted
-  ([1307](https://github.com/Shopify/sarama/pull/1307)).
-- Add support for Kafka 2.2.0
-  ([1331](https://github.com/Shopify/sarama/pull/1331)).
-- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
-  ([1331](https://github.com/Shopify/sarama/pull/1295)).
-
-Improvements:
-- Unregister all broker metrics on broker stop
-  ([1232](https://github.com/Shopify/sarama/pull/1232)).
-- Add SCRAM authentication example
-  ([1303](https://github.com/Shopify/sarama/pull/1303)).
-- Add consumergroup examples
-  ([1304](https://github.com/Shopify/sarama/pull/1304)).
-- Expose consumer batch size metric
-  ([1296](https://github.com/Shopify/sarama/pull/1296)).
-- Add TLS options to console producer and consumer
-  ([1300](https://github.com/Shopify/sarama/pull/1300)).
-- Reduce client close bookkeeping
-  ([1297](https://github.com/Shopify/sarama/pull/1297)).
-- Satisfy error interface in create responses
-  ([1154](https://github.com/Shopify/sarama/pull/1154)).
-- Please lint gods
-  ([1346](https://github.com/Shopify/sarama/pull/1346)).
-
-Bug Fixes:
-- Fix multi consumer group instance crash
-  ([1338](https://github.com/Shopify/sarama/pull/1338)).
-- Update lz4 to latest version
-  ([1347](https://github.com/Shopify/sarama/pull/1347)).
-- Retry ErrNotCoordinatorForConsumer in new consumergroup session
-  ([1231](https://github.com/Shopify/sarama/pull/1231)).
-- Fix cleanup error handler
-  ([1332](https://github.com/Shopify/sarama/pull/1332)).
-- Fix rate condition in PartitionConsumer
-  ([1156](https://github.com/Shopify/sarama/pull/1156)).
-
-#### Version 1.21.0 (2019-02-24)
-
-New Features:
-- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
-  ([1236](https://github.com/Shopify/sarama/pull/1236)).
-- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
-  ([1178](https://github.com/Shopify/sarama/pull/1178)).
-- Implement SASL/OAUTHBEARER
-  ([1240](https://github.com/Shopify/sarama/pull/1240)).
-
-Improvements:
-- Add Go mod support
-  ([1282](https://github.com/Shopify/sarama/pull/1282)).
-- Add error codes 73—76
-  ([1239](https://github.com/Shopify/sarama/pull/1239)).
-- Add retry backoff function
-  ([1160](https://github.com/Shopify/sarama/pull/1160)).
-- Maintain metadata in the producer even when retries are disabled
-  ([1189](https://github.com/Shopify/sarama/pull/1189)).
-- Include ReplicaAssignment in ListTopics
-  ([1274](https://github.com/Shopify/sarama/pull/1274)).
-- Add producer performance tool
-  ([1222](https://github.com/Shopify/sarama/pull/1222)).
-- Add support LogAppend timestamps
-  ([1258](https://github.com/Shopify/sarama/pull/1258)).
-
-Bug Fixes:
-- Fix potential deadlock when a heartbeat request fails
-  ([1286](https://github.com/Shopify/sarama/pull/1286)).
-- Fix consuming compacted topic
-  ([1227](https://github.com/Shopify/sarama/pull/1227)).
-- Set correct Kafka version for DescribeConfigsRequest v1
-  ([1277](https://github.com/Shopify/sarama/pull/1277)).
-- Update kafka test version
-  ([1273](https://github.com/Shopify/sarama/pull/1273)).
-
-#### Version 1.20.1 (2019-01-10)
-
-New Features:
-- Add optional replica id in offset request
-  ([1100](https://github.com/Shopify/sarama/pull/1100)).
-
-Improvements:
-- Implement DescribeConfigs Request + Response v1 & v2
-  ([1230](https://github.com/Shopify/sarama/pull/1230)).
-- Reuse compression objects
-  ([1185](https://github.com/Shopify/sarama/pull/1185)).
-- Switch from png to svg for GoDoc link in README
-  ([1243](https://github.com/Shopify/sarama/pull/1243)).
-- Fix typo in deprecation notice for FetchResponseBlock.Records
-  ([1242](https://github.com/Shopify/sarama/pull/1242)).
-- Fix typos in consumer metadata response file
-  ([1244](https://github.com/Shopify/sarama/pull/1244)).
-
-Bug Fixes:
-- Revert to individual msg retries for non-idempotent
-  ([1203](https://github.com/Shopify/sarama/pull/1203)).
-- Respect MaxMessageBytes limit for uncompressed messages
-  ([1141](https://github.com/Shopify/sarama/pull/1141)).
-
-#### Version 1.20.0 (2018-12-10)
-
-New Features:
- - Add support for zstd compression
-   ([#1170](https://github.com/Shopify/sarama/pull/1170)).
- - Add support for Idempotent Producer
-   ([#1152](https://github.com/Shopify/sarama/pull/1152)).
- - Add support support for Kafka 2.1.0
-   ([#1229](https://github.com/Shopify/sarama/pull/1229)).
- - Add support support for OffsetCommit request/response pairs versions v1 to v5
-   ([#1201](https://github.com/Shopify/sarama/pull/1201)).
- - Add support support for OffsetFetch request/response pair up to version v5
-   ([#1198](https://github.com/Shopify/sarama/pull/1198)).
-
-Improvements:
- - Export broker's Rack setting
-   ([#1173](https://github.com/Shopify/sarama/pull/1173)).
- - Always use latest patch version of Go on CI
-   ([#1202](https://github.com/Shopify/sarama/pull/1202)).
- - Add error codes 61 to 72
-   ([#1195](https://github.com/Shopify/sarama/pull/1195)).
-
-Bug Fixes:
- - Fix build without cgo
-   ([#1182](https://github.com/Shopify/sarama/pull/1182)).
- - Fix go vet suggestion in consumer group file
-   ([#1209](https://github.com/Shopify/sarama/pull/1209)).
- - Fix typos in code and comments
-   ([#1228](https://github.com/Shopify/sarama/pull/1228)).
-
-#### Version 1.19.0 (2018-09-27)
-
-New Features:
- - Implement a higher-level consumer group
-   ([#1099](https://github.com/Shopify/sarama/pull/1099)).
-
-Improvements:
- - Add support for Go 1.11
-   ([#1176](https://github.com/Shopify/sarama/pull/1176)).
-
-Bug Fixes:
- - Fix encoding of `MetadataResponse` with version 2 and higher
-   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
- - Fix race condition in mock async producer
-   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
-
-#### Version 1.18.0 (2018-09-07)
-
-New Features:
- - Make `Partitioner.RequiresConsistency` vary per-message
-   ([#1112](https://github.com/Shopify/sarama/pull/1112)).
- - Add customizable partitioner
-   ([#1118](https://github.com/Shopify/sarama/pull/1118)).
- - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
-   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
-   ([#1055](https://github.com/Shopify/sarama/pull/1055)).
-
-Improvements:
- - Add support for Kafka 2.0.0
-   ([#1149](https://github.com/Shopify/sarama/pull/1149)).
- - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
-   ([#1123](https://github.com/Shopify/sarama/pull/1123)).
- - Simpler offset management
-   ([#1127](https://github.com/Shopify/sarama/pull/1127)).
-
-Bug Fixes:
- - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
-   ([#1110](https://github.com/Shopify/sarama/pull/1110)).
- - Fix consumer block when response did not contain all the
-   expected topic/partition blocks
-   ([#1086](https://github.com/Shopify/sarama/pull/1086)).
- - Fix consumer block when response contains only constrol messages
-   ([#1115](https://github.com/Shopify/sarama/pull/1115)).
- - Add timeout config for ClusterAdmin requests
-   ([#1142](https://github.com/Shopify/sarama/pull/1142)).
- - Add version check when producing message with headers
-   ([#1117](https://github.com/Shopify/sarama/pull/1117)).
- - Fix `MetadataRequest` for empty list of topics
-   ([#1132](https://github.com/Shopify/sarama/pull/1132)).
- - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
-   ([#1125](https://github.com/Shopify/sarama/pull/1125)).
-
-#### Version 1.17.0 (2018-05-30)
-
-New Features:
- - Add support for gzip compression levels
-   ([#1044](https://github.com/Shopify/sarama/pull/1044)).
- - Add support for Metadata request/response pairs versions v1 to v5
-   ([#1047](https://github.com/Shopify/sarama/pull/1047),
-    [#1069](https://github.com/Shopify/sarama/pull/1069)).
- - Add versioning to JoinGroup request/response pairs
-   ([#1098](https://github.com/Shopify/sarama/pull/1098))
- - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
-   ([#1065](https://github.com/Shopify/sarama/pull/1065),
-    [#1096](https://github.com/Shopify/sarama/pull/1096),
-    [#1027](https://github.com/Shopify/sarama/pull/1027)).
- - Add `Controller()` method to Client interface
-   ([#1063](https://github.com/Shopify/sarama/pull/1063)).
-
-Improvements:
- - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
-   ([#1010](https://github.com/Shopify/sarama/pull/1010)).
- - Expose missing protocol parts: `msgSet` and `recordBatch`
-   ([#1049](https://github.com/Shopify/sarama/pull/1049)).
- - Add support for v1 DeleteTopics Request
-   ([#1052](https://github.com/Shopify/sarama/pull/1052)).
- - Add support for Go 1.10
-   ([#1064](https://github.com/Shopify/sarama/pull/1064)).
- - Claim support for Kafka 1.1.0
-   ([#1073](https://github.com/Shopify/sarama/pull/1073)).
-
-Bug Fixes:
- - Fix FindCoordinatorResponse.encode to allow nil Coordinator
-   ([#1050](https://github.com/Shopify/sarama/pull/1050),
-    [#1051](https://github.com/Shopify/sarama/pull/1051)).
- - Clear all metadata when we have the latest topic info
-   ([#1033](https://github.com/Shopify/sarama/pull/1033)).
- - Make `PartitionConsumer.Close` idempotent
-   ([#1092](https://github.com/Shopify/sarama/pull/1092)).
-
-#### Version 1.16.0 (2018-02-12)
-
-New Features:
- - Add support for the Create/Delete Topics request/response pairs
-   ([#1007](https://github.com/Shopify/sarama/pull/1007),
-    [#1008](https://github.com/Shopify/sarama/pull/1008)).
- - Add support for the Describe/Create/Delete ACL request/response pairs
-   ([#1009](https://github.com/Shopify/sarama/pull/1009)).
- - Add support for the five transaction-related request/response pairs
-   ([#1016](https://github.com/Shopify/sarama/pull/1016)).
-
-Improvements:
- - Permit setting version on mock producer responses
-   ([#999](https://github.com/Shopify/sarama/pull/999)).
- - Add `NewMockBrokerListener` helper for testing TLS connections
-   ([#1019](https://github.com/Shopify/sarama/pull/1019)).
- - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
-   which results in much higher throughput in most cases
-   ([#1024](https://github.com/Shopify/sarama/pull/1024)).
- - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
-   reduce CPU and memory usage when processing many partitions
-   ([#1028](https://github.com/Shopify/sarama/pull/1028)).
- - Assign relative offsets to messages in the producer to save the brokers a
-   recompression pass
-   ([#1002](https://github.com/Shopify/sarama/pull/1002),
-    [#1015](https://github.com/Shopify/sarama/pull/1015)).
-
-Bug Fixes:
- - Fix producing uncompressed batches with the new protocol format
-   ([#1032](https://github.com/Shopify/sarama/issues/1032)).
- - Fix consuming compacted topics with the new protocol format
-   ([#1005](https://github.com/Shopify/sarama/issues/1005)).
- - Fix consuming topics with a mix of protocol formats
-   ([#1021](https://github.com/Shopify/sarama/issues/1021)).
- - Fix consuming when the broker includes multiple batches in a single response
-   ([#1022](https://github.com/Shopify/sarama/issues/1022)).
- - Fix detection of `PartialTrailingMessage` when the partial message was
-   truncated before the magic value indicating its version
-   ([#1030](https://github.com/Shopify/sarama/pull/1030)).
- - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
-   ([#1035](https://github.com/Shopify/sarama/pull/1035)).
-
-#### Version 1.15.0 (2017-12-08)
-
-New Features:
- - Claim official support for Kafka 1.0, though it did already work
-   ([#984](https://github.com/Shopify/sarama/pull/984)).
- - Helper methods for Kafka version numbers to/from strings
-   ([#989](https://github.com/Shopify/sarama/pull/989)).
- - Implement CreatePartitions request/response
-   ([#985](https://github.com/Shopify/sarama/pull/985)).
-
-Improvements:
- - Add error codes 45-60
-   ([#986](https://github.com/Shopify/sarama/issues/986)).
-
-Bug Fixes:
- - Fix slow consuming for certain Kafka 0.11/1.0 configurations
-   ([#982](https://github.com/Shopify/sarama/pull/982)).
- - Correctly determine when a FetchResponse contains the new message format
-   ([#990](https://github.com/Shopify/sarama/pull/990)).
- - Fix producing with multiple headers
-   ([#996](https://github.com/Shopify/sarama/pull/996)).
- - Fix handling of truncated record batches
-   ([#998](https://github.com/Shopify/sarama/pull/998)).
- - Fix leaking metrics when closing brokers
-   ([#991](https://github.com/Shopify/sarama/pull/991)).
-
-#### Version 1.14.0 (2017-11-13)
-
-New Features:
- - Add support for the new Kafka 0.11 record-batch format, including the wire
-   protocol and the necessary behavioural changes in the producer and consumer.
-   Transactions and idempotency are not yet supported, but producing and
-   consuming should work with all the existing bells and whistles (batching,
-   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
-   of Arista Networks for this work. Part of
-   ([#901](https://github.com/Shopify/sarama/issues/901)).
-
-Bug Fixes:
- - Fix encoding of ProduceResponse versions in test
-   ([#970](https://github.com/Shopify/sarama/pull/970)).
- - Return partial replicas list when we have it
-   ([#975](https://github.com/Shopify/sarama/pull/975)).
-
-#### Version 1.13.0 (2017-10-04)
-
-New Features:
- - Support for FetchRequest version 3
-   ([#905](https://github.com/Shopify/sarama/pull/905)).
- - Permit setting version on mock FetchResponses
-   ([#939](https://github.com/Shopify/sarama/pull/939)).
- - Add a configuration option to support storing only minimal metadata for
-   extremely large clusters
-   ([#937](https://github.com/Shopify/sarama/pull/937)).
- - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
-   ([#932](https://github.com/Shopify/sarama/pull/932)).
-
-Improvements:
- - Provide the block-level timestamp when consuming compressed messages
-   ([#885](https://github.com/Shopify/sarama/issues/885)).
- - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
-   by the broker, which can be meaningful
-   ([#930](https://github.com/Shopify/sarama/pull/930)).
- - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
-   variance in the actual timeout
-   ([#933](https://github.com/Shopify/sarama/pull/933)).
-
-Bug Fixes:
- - Gracefully handle messages with negative timestamps
-   ([#907](https://github.com/Shopify/sarama/pull/907)).
- - Raise a proper error when encountering an unknown message version
-   ([#940](https://github.com/Shopify/sarama/pull/940)).
-
-#### Version 1.12.0 (2017-05-08)
-
-New Features:
- - Added support for the `ApiVersions` request and response pair, and Kafka
-   version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
-   that you still need to specify the Kafka version in the Sarama configuration
-   for the time being.
- - Added a `Brokers` method to the Client which returns the complete set of
-   active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- - Added an `InSyncReplicas` method to the Client which returns the set of all
-   in-sync broker IDs for the given partition, now that the Kafka versions for
-   which this was misleading are no longer in our supported set
-   ([#872](https://github.com/Shopify/sarama/pull/872)).
- - Added a `NewCustomHashPartitioner` method which allows constructing a hash
-   partitioner with a custom hash method in case the default (FNV-1a) is not
-   suitable
-   ([#837](https://github.com/Shopify/sarama/pull/837),
-    [#841](https://github.com/Shopify/sarama/pull/841)).
-
-Improvements:
- - Recognize more Kafka error codes
-   ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-Bug Fixes:
- - Fix an issue where decoding a malformed FetchRequest would not return the
-   correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- - Respect ordering of group protocols in JoinGroupRequests. This fix is
-   transparent if you're using the `AddGroupProtocol` or
-   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
-   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
-   ([#812](https://github.com/Shopify/sarama/issues/812)).
- - Fix an alignment-related issue with atomics on 32-bit architectures
-   ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-#### Version 1.11.0 (2016-12-20)
-
-_Important:_ As of Sarama 1.11 it is necessary to set the config value of
-`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
-versions would silently override this value when instantiating a SyncProducer
-which led to unexpected values and data races.
-
-New Features:
- - Metrics! Thanks to Sébastien Launay for all his work on this feature
-   ([#701](https://github.com/Shopify/sarama/pull/701),
-    [#746](https://github.com/Shopify/sarama/pull/746),
-    [#766](https://github.com/Shopify/sarama/pull/766)).
- - Add support for LZ4 compression
-   ([#786](https://github.com/Shopify/sarama/pull/786)).
- - Add support for ListOffsetRequest v1 and Kafka 0.10.1
-   ([#775](https://github.com/Shopify/sarama/pull/775)).
- - Added a `HighWaterMarks` method to the Consumer which aggregates the
-   `HighWaterMarkOffset` values of its child topic/partitions
-   ([#769](https://github.com/Shopify/sarama/pull/769)).
-
-Bug Fixes:
- - Fixed producing when using timestamps, compression and Kafka 0.10
-   ([#759](https://github.com/Shopify/sarama/pull/759)).
- - Added missing decoder methods to DescribeGroups response
-   ([#756](https://github.com/Shopify/sarama/pull/756)).
- - Fix producer shutdown when `Return.Errors` is disabled
-   ([#787](https://github.com/Shopify/sarama/pull/787)).
- - Don't mutate configuration in SyncProducer
-   ([#790](https://github.com/Shopify/sarama/pull/790)).
- - Fix crash on SASL initialization failure
-   ([#795](https://github.com/Shopify/sarama/pull/795)).
-
-#### Version 1.10.1 (2016-08-30)
-
-Bug Fixes:
- - Fix the documentation for `HashPartitioner` which was incorrect
-   ([#717](https://github.com/Shopify/sarama/pull/717)).
- - Permit client creation even when it is limited by ACLs
-   ([#722](https://github.com/Shopify/sarama/pull/722)).
- - Several fixes to the consumer timer optimization code, regressions introduced
-   in v1.10.0. Go's timers are finicky
-   ([#730](https://github.com/Shopify/sarama/pull/730),
-    [#733](https://github.com/Shopify/sarama/pull/733),
-    [#734](https://github.com/Shopify/sarama/pull/734)).
- - Handle consuming compressed relative offsets with Kafka 0.10
-   ([#735](https://github.com/Shopify/sarama/pull/735)).
-
-#### Version 1.10.0 (2016-08-02)
-
-_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
-Kafka you are running against (via the `config.Version` value) in order to use
-features that may not be compatible with old Kafka versions. If you don't
-specify this value it will default to 0.8.2 (the minimum supported), and trying
-to use more recent features (like the offset manager) will fail with an error.
-
-_Also:_ The offset-manager's behaviour has been changed to match the upstream
-java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
-[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
-offset-manager, please ensure that you are committing one *greater* than the
-last consumed message offset or else you may end up consuming duplicate
-messages.
-
-New Features:
- - Support for Kafka 0.10
-   ([#672](https://github.com/Shopify/sarama/pull/672),
-    [#678](https://github.com/Shopify/sarama/pull/678),
-    [#681](https://github.com/Shopify/sarama/pull/681), and others).
- - Support for configuring the target Kafka version
-   ([#676](https://github.com/Shopify/sarama/pull/676)).
- - Batch producing support in the SyncProducer
-   ([#677](https://github.com/Shopify/sarama/pull/677)).
- - Extend producer mock to allow setting expectations on message contents
-   ([#667](https://github.com/Shopify/sarama/pull/667)).
-
-Improvements:
- - Support `nil` compressed messages for deleting in compacted topics
-   ([#634](https://github.com/Shopify/sarama/pull/634)).
- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
-   misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- - Re-use consumer expiry timers, removing one allocation per consumed message
-   ([#707](https://github.com/Shopify/sarama/pull/707)).
-
-Bug Fixes:
- - Actually default the client ID to "sarama" like we say we do
-   ([#664](https://github.com/Shopify/sarama/pull/664)).
- - Fix a rare issue where `Client.Leader` could return the wrong error
-   ([#685](https://github.com/Shopify/sarama/pull/685)).
- - Fix a possible tight loop in the consumer
-   ([#693](https://github.com/Shopify/sarama/pull/693)).
- - Match upstream's offset-tracking behaviour
-   ([#705](https://github.com/Shopify/sarama/pull/705)).
- - Report UnknownTopicOrPartition errors from the offset manager
-   ([#706](https://github.com/Shopify/sarama/pull/706)).
- - Fix possible negative partition value from the HashPartitioner
-   ([#709](https://github.com/Shopify/sarama/pull/709)).
-
-#### Version 1.9.0 (2016-05-16)
-
-New Features:
- - Add support for custom offset manager retention durations
-   ([#602](https://github.com/Shopify/sarama/pull/602)).
- - Publish low-level mocks to enable testing of third-party producer/consumer
-   implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- - Declare support for Golang 1.6
-   ([#611](https://github.com/Shopify/sarama/pull/611)).
- - Support for SASL plain-text auth
-   ([#648](https://github.com/Shopify/sarama/pull/648)).
-
-Improvements:
- - Simplified broker locking scheme slightly
-   ([#604](https://github.com/Shopify/sarama/pull/604)).
- - Documentation cleanup
-   ([#605](https://github.com/Shopify/sarama/pull/605),
-    [#621](https://github.com/Shopify/sarama/pull/621),
-    [#654](https://github.com/Shopify/sarama/pull/654)).
-
-Bug Fixes:
- - Fix race condition shutting down the OffsetManager
-   ([#658](https://github.com/Shopify/sarama/pull/658)).
-
-#### Version 1.8.0 (2016-02-01)
-
-New Features:
- - Full support for Kafka 0.9:
-   - All protocol messages and fields
-   ([#586](https://github.com/Shopify/sarama/pull/586),
-   [#588](https://github.com/Shopify/sarama/pull/588),
-   [#590](https://github.com/Shopify/sarama/pull/590)).
-   - Verified that TLS support works
-   ([#581](https://github.com/Shopify/sarama/pull/581)).
-   - Fixed the OffsetManager compatibility
-   ([#585](https://github.com/Shopify/sarama/pull/585)).
-
-Improvements:
- - Optimize for fewer system calls when reading from the network
-   ([#584](https://github.com/Shopify/sarama/pull/584)).
- - Automatically retry `InvalidMessage` errors to match upstream behaviour
-   ([#589](https://github.com/Shopify/sarama/pull/589)).
-
-#### Version 1.7.0 (2015-12-11)
-
-New Features:
- - Preliminary support for Kafka 0.9
-   ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
-   caveats:
-   - Protocol-layer support is mostly in place
-     ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
-     renamed some messages and fields, which we did not in order to preserve API
-     compatibility.
-   - The producer and consumer work against 0.9, but the offset manager does
-     not ([#573](https://github.com/Shopify/sarama/pull/573)).
-   - TLS support may or may not work
-     ([#581](https://github.com/Shopify/sarama/pull/581)).
-
-Improvements:
- - Don't wait for request timeouts on dead brokers, greatly speeding recovery
-   when the TCP connection is left hanging
-   ([#548](https://github.com/Shopify/sarama/pull/548)).
- - Refactored part of the producer. The new version provides a much more elegant
-   solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
-   slightly more efficient, and much more precise in calculating batch sizes
-   when compression is used
-   ([#549](https://github.com/Shopify/sarama/pull/549),
-   [#550](https://github.com/Shopify/sarama/pull/550),
-   [#551](https://github.com/Shopify/sarama/pull/551)).
-
-Bug Fixes:
- - Fix race condition in consumer test mock
-   ([#553](https://github.com/Shopify/sarama/pull/553)).
-
-#### Version 1.6.1 (2015-09-25)
-
-Bug Fixes:
- - Fix panic that could occur if a user-supplied message value failed to encode
-   ([#449](https://github.com/Shopify/sarama/pull/449)).
-
-#### Version 1.6.0 (2015-09-04)
-
-New Features:
- - Implementation of a consumer offset manager using the APIs introduced in
-   Kafka 0.8.2. The API is designed mainly for integration into a future
-   high-level consumer, not for direct use, although it is *possible* to use it
-   directly.
-   ([#461](https://github.com/Shopify/sarama/pull/461)).
-
-Improvements:
- - CRC32 calculation is much faster on machines with SSE4.2 instructions,
-   removing a major hotspot from most profiles
-   ([#255](https://github.com/Shopify/sarama/pull/255)).
-
-Bug Fixes:
- - Make protocol decoding more robust against some malformed packets generated
-   by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
-   [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
-   ([#528](https://github.com/Shopify/sarama/pull/528)).
- - Fix a potential race condition panic in the consumer on shutdown
-   ([#529](https://github.com/Shopify/sarama/pull/529)).
-
-#### Version 1.5.0 (2015-08-17)
-
-New Features:
- - TLS-encrypted network connections are now supported. This feature is subject
-   to change when Kafka releases built-in TLS support, but for now this is
-   enough to work with TLS-terminating proxies
-   ([#154](https://github.com/Shopify/sarama/pull/154)).
-
-Improvements:
- - The consumer will not block if a single partition is not drained by the user;
-   all other partitions will continue to consume normally
-   ([#485](https://github.com/Shopify/sarama/pull/485)).
- - Formatting of error strings has been much improved
-   ([#495](https://github.com/Shopify/sarama/pull/495)).
- - Internal refactoring of the producer for code cleanliness and to enable
-   future work ([#300](https://github.com/Shopify/sarama/pull/300)).
-
-Bug Fixes:
- - Fix a potential deadlock in the consumer on shutdown
-   ([#475](https://github.com/Shopify/sarama/pull/475)).
-
-#### Version 1.4.3 (2015-07-21)
-
-Bug Fixes:
- - Don't include the partitioner in the producer's "fetch partitions"
-   circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- - Don't retry messages until the broker is closed when abandoning a broker in
-   the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- - Update the import path for snappy-go, it has moved again and the API has
-   changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
-
-#### Version 1.4.2 (2015-05-27)
-
-Bug Fixes:
- - Update the import path for snappy-go, it has moved from google code to github
-   ([#456](https://github.com/Shopify/sarama/pull/456)).
-
-#### Version 1.4.1 (2015-05-25)
-
-Improvements:
- - Optimizations when decoding snappy messages, thanks to John Potocny
-   ([#446](https://github.com/Shopify/sarama/pull/446)).
-
-Bug Fixes:
- - Fix hypothetical race conditions on producer shutdown
-   ([#450](https://github.com/Shopify/sarama/pull/450),
-   [#451](https://github.com/Shopify/sarama/pull/451)).
-
-#### Version 1.4.0 (2015-05-01)
-
-New Features:
- - The consumer now implements `Topics()` and `Partitions()` methods to enable
-   users to dynamically choose what topics/partitions to consume without
-   instantiating a full client
-   ([#431](https://github.com/Shopify/sarama/pull/431)).
- - The partition-consumer now exposes the high water mark offset value returned
-   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- - Added a `kafka-console-consumer` tool capable of handling multiple
-   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
-   ([#439](https://github.com/Shopify/sarama/pull/439),
-   [#442](https://github.com/Shopify/sarama/pull/442)).
-
-Improvements:
- - The producer's logging during retry scenarios is more consistent, more
-   useful, and slightly less verbose
-   ([#429](https://github.com/Shopify/sarama/pull/429)).
- - The client now shuffles its initial list of seed brokers in order to prevent
-   thundering herd on the first broker in the list
-   ([#441](https://github.com/Shopify/sarama/pull/441)).
-
-Bug Fixes:
- - The producer now correctly manages its state if retries occur when it is
-   shutting down, fixing several instances of confusing behaviour and at least
-   one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- - The consumer now handles messages for different partitions asynchronously,
-   making it much more resilient to specific user code ordering
-   ([#325](https://github.com/Shopify/sarama/pull/325)).
-
-#### Version 1.3.0 (2015-04-16)
-
-New Features:
- - The client now tracks consumer group coordinators using
-   ConsumerMetadataRequests similar to how it tracks partition leadership using
-   regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
-   This adds two methods to the client API:
-   - `Coordinator(consumerGroup string) (*Broker, error)`
-   - `RefreshCoordinator(consumerGroup string) error`
-
-Improvements:
- - ConsumerMetadataResponses now automatically create a Broker object out of the
-   ID/address/port combination for the Coordinator; accessing the fields
-   individually has been deprecated
-   ([#413](https://github.com/Shopify/sarama/pull/413)).
- - Much improved handling of `OffsetOutOfRange` errors in the consumer.
-   Consumers will fail to start if the provided offset is out of range
-   ([#418](https://github.com/Shopify/sarama/pull/418))
-   and they will automatically shut down if the offset falls out of range
-   ([#424](https://github.com/Shopify/sarama/pull/424)).
- - Small performance improvement in encoding and decoding protocol messages
-   ([#427](https://github.com/Shopify/sarama/pull/427)).
-
-Bug Fixes:
- - Fix a rare race condition in the client's background metadata refresher if
-   it happens to be activated while the client is being closed
-   ([#422](https://github.com/Shopify/sarama/pull/422)).
-
-#### Version 1.2.0 (2015-04-07)
-
-Improvements:
- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
-   ([#389](https://github.com/Shopify/sarama/pull/389)).
- - The producer is now somewhat more memory-efficient during and after retrying
-   messages due to an improved queue implementation
-   ([#396](https://github.com/Shopify/sarama/pull/396)).
- - The consumer produces much more useful logging output when leadership
-   changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- - The client's `GetOffset` method will now automatically refresh metadata and
-   retry once in the event of stale information or similar
-   ([#394](https://github.com/Shopify/sarama/pull/394)).
- - Broker connections now have support for using TCP keepalives
-   ([#407](https://github.com/Shopify/sarama/issues/407)).
-
-Bug Fixes:
- - The OffsetCommitRequest message now correctly implements all three possible
-   API versions ([#390](https://github.com/Shopify/sarama/pull/390),
-   [#400](https://github.com/Shopify/sarama/pull/400)).
-
-#### Version 1.1.0 (2015-03-20)
-
-Improvements:
- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
-   broken topics don't choke throughput
-   ([#373](https://github.com/Shopify/sarama/pull/373)).
-
-Bug Fixes:
- - Fix the producer's internal reference counting in certain unusual scenarios
-   ([#367](https://github.com/Shopify/sarama/pull/367)).
- - Fix the consumer's internal reference counting in certain unusual scenarios
-   ([#369](https://github.com/Shopify/sarama/pull/369)).
- - Fix a condition where the producer's internal control messages could have
-   gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- - Fix an issue where invalid partition lists would be cached when asking for
-   metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
-
-
-#### Version 1.0.0 (2015-03-17)
-
-Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
-
-- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
-- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
-- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
-- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
-- All the configuration values have been unified in the `Config` struct.
-- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
deleted file mode 100644
index d2bf435..0000000
--- a/vendor/github.com/Shopify/sarama/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013 Shopify
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
deleted file mode 100644
index 4714d77..0000000
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-default: fmt get update test lint
-
-GO       := go
-GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
-GOTEST   := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
-
-FILES    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
-TESTS    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
-
-get:
-	$(GO) get ./...
-	$(GO) mod verify
-	$(GO) mod tidy
-
-update:
-	$(GO) get -u -v ./...
-	$(GO) mod verify
-	$(GO) mod tidy
-
-fmt:
-	gofmt -s -l -w $(FILES) $(TESTS)
-
-lint:
-	GOFLAGS="-tags=functional" golangci-lint run
-
-test:
-	$(GOTEST) ./...
-
-.PHONY: test_functional
-test_functional:
-	$(GOTEST) -tags=functional ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
deleted file mode 100644
index f2beb73..0000000
--- a/vendor/github.com/Shopify/sarama/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# sarama
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama)
-[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
-[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
-
-Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
-
-## Getting started
-
-- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama).
-- Mocks for testing are available in the [mocks](./mocks) subpackage.
-- The [examples](./examples) directory contains more elaborate example applications.
-- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
-
-You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
-
-## Compatibility and API stability
-
-Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
-the two latest stable releases of Kafka and Go, and we provide a two month
-grace period for older releases. This means we currently officially support
-Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
-still likely to work.
-
-Sarama follows semantic versioning and provides API stability via the gopkg.in service.
-You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
-A changelog is available [here](CHANGELOG.md).
-
-## Contributing
-
-- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
-- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
-- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
deleted file mode 100644
index 449102f..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-// CreateAclsRequest is an acl creation request
-type CreateAclsRequest struct {
-	Version      int16
-	AclCreations []*AclCreation
-}
-
-func (c *CreateAclsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
-		return err
-	}
-
-	for _, aclCreation := range c.AclCreations {
-		if err := aclCreation.encode(pe, c.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	c.Version = version
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.AclCreations = make([]*AclCreation, n)
-
-	for i := 0; i < n; i++ {
-		c.AclCreations[i] = new(AclCreation)
-		if err := c.AclCreations[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsRequest) key() int16 {
-	return 30
-}
-
-func (c *CreateAclsRequest) version() int16 {
-	return c.Version
-}
-
-func (c *CreateAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
-
-// AclCreation is a wrapper around Resource and Acl type
-type AclCreation struct {
-	Resource
-	Acl
-}
-
-func (a *AclCreation) encode(pe packetEncoder, version int16) error {
-	if err := a.Resource.encode(pe, version); err != nil {
-		return err
-	}
-	if err := a.Acl.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
-	if err := a.Resource.decode(pd, version); err != nil {
-		return err
-	}
-	if err := a.Acl.decode(pd, version); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
deleted file mode 100644
index 21d6c34..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-// CreateAclsResponse is a an acl response creation type
-type CreateAclsResponse struct {
-	ThrottleTime         time.Duration
-	AclCreationResponses []*AclCreationResponse
-}
-
-func (c *CreateAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
-		return err
-	}
-
-	for _, aclCreationResponse := range c.AclCreationResponses {
-		if err := aclCreationResponse.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.AclCreationResponses = make([]*AclCreationResponse, n)
-	for i := 0; i < n; i++ {
-		c.AclCreationResponses[i] = new(AclCreationResponse)
-		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsResponse) key() int16 {
-	return 30
-}
-
-func (c *CreateAclsResponse) version() int16 {
-	return 0
-}
-
-func (c *CreateAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// AclCreationResponse is an acl creation response type
-type AclCreationResponse struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (a *AclCreationResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(a.Err))
-
-	if err := pe.putNullableString(a.ErrMsg); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.Err = KError(kerr)
-
-	if a.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
deleted file mode 100644
index 5e5c03b..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package sarama
-
-// DeleteAclsRequest is a delete acl request
-type DeleteAclsRequest struct {
-	Version int
-	Filters []*AclFilter
-}
-
-func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(d.Filters)); err != nil {
-		return err
-	}
-
-	for _, filter := range d.Filters {
-		filter.Version = d.Version
-		if err := filter.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	d.Version = int(version)
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	d.Filters = make([]*AclFilter, n)
-	for i := 0; i < n; i++ {
-		d.Filters[i] = new(AclFilter)
-		d.Filters[i].Version = int(version)
-		if err := d.Filters[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsRequest) key() int16 {
-	return 31
-}
-
-func (d *DeleteAclsRequest) version() int16 {
-	return int16(d.Version)
-}
-
-func (d *DeleteAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
deleted file mode 100644
index cd33749..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package sarama
-
-import "time"
-
-// DeleteAclsResponse is a delete acl response
-type DeleteAclsResponse struct {
-	Version         int16
-	ThrottleTime    time.Duration
-	FilterResponses []*FilterResponse
-}
-
-func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
-		return err
-	}
-
-	for _, filterResponse := range d.FilterResponses {
-		if err := filterResponse.encode(pe, d.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	d.FilterResponses = make([]*FilterResponse, n)
-
-	for i := 0; i < n; i++ {
-		d.FilterResponses[i] = new(FilterResponse)
-		if err := d.FilterResponses[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsResponse) key() int16 {
-	return 31
-}
-
-func (d *DeleteAclsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// FilterResponse is a filter response type
-type FilterResponse struct {
-	Err          KError
-	ErrMsg       *string
-	MatchingAcls []*MatchingAcl
-}
-
-func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(f.Err))
-	if err := pe.putNullableString(f.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
-		return err
-	}
-	for _, matchingAcl := range f.MatchingAcls {
-		if err := matchingAcl.encode(pe, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	f.Err = KError(kerr)
-
-	if f.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	f.MatchingAcls = make([]*MatchingAcl, n)
-	for i := 0; i < n; i++ {
-		f.MatchingAcls[i] = new(MatchingAcl)
-		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// MatchingAcl is a matching acl type
-type MatchingAcl struct {
-	Err    KError
-	ErrMsg *string
-	Resource
-	Acl
-}
-
-func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(m.Err))
-	if err := pe.putNullableString(m.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := m.Resource.encode(pe, version); err != nil {
-		return err
-	}
-
-	if err := m.Acl.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	m.Err = KError(kerr)
-
-	if m.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	if err := m.Resource.decode(pd, version); err != nil {
-		return err
-	}
-
-	if err := m.Acl.decode(pd, version); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
deleted file mode 100644
index e0fe902..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package sarama
-
-// DescribeAclsRequest is a secribe acl request type
-type DescribeAclsRequest struct {
-	Version int
-	AclFilter
-}
-
-func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
-	d.AclFilter.Version = d.Version
-	return d.AclFilter.encode(pe)
-}
-
-func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	d.Version = int(version)
-	d.AclFilter.Version = int(version)
-	return d.AclFilter.decode(pd, version)
-}
-
-func (d *DescribeAclsRequest) key() int16 {
-	return 29
-}
-
-func (d *DescribeAclsRequest) version() int16 {
-	return int16(d.Version)
-}
-
-func (d *DescribeAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
deleted file mode 100644
index 3255fd4..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package sarama
-
-import "time"
-
-// DescribeAclsResponse is a describe acl response type
-type DescribeAclsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Err          KError
-	ErrMsg       *string
-	ResourceAcls []*ResourceAcls
-}
-
-func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(d.Err))
-
-	if err := pe.putNullableString(d.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
-		return err
-	}
-
-	for _, resourceAcl := range d.ResourceAcls {
-		if err := resourceAcl.encode(pe, d.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	d.Err = KError(kerr)
-
-	errmsg, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	if errmsg != "" {
-		d.ErrMsg = &errmsg
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	d.ResourceAcls = make([]*ResourceAcls, n)
-
-	for i := 0; i < n; i++ {
-		d.ResourceAcls[i] = new(ResourceAcls)
-		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DescribeAclsResponse) key() int16 {
-	return 29
-}
-
-func (d *DescribeAclsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DescribeAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
deleted file mode 100644
index c3ba8dd..0000000
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-)
-
-type (
-	AclOperation int
-
-	AclPermissionType int
-
-	AclResourceType int
-
-	AclResourcePatternType int
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
-const (
-	AclOperationUnknown AclOperation = iota
-	AclOperationAny
-	AclOperationAll
-	AclOperationRead
-	AclOperationWrite
-	AclOperationCreate
-	AclOperationDelete
-	AclOperationAlter
-	AclOperationDescribe
-	AclOperationClusterAction
-	AclOperationDescribeConfigs
-	AclOperationAlterConfigs
-	AclOperationIdempotentWrite
-)
-
-func (a *AclOperation) String() string {
-	mapping := map[AclOperation]string{
-		AclOperationUnknown:         "Unknown",
-		AclOperationAny:             "Any",
-		AclOperationAll:             "All",
-		AclOperationRead:            "Read",
-		AclOperationWrite:           "Write",
-		AclOperationCreate:          "Create",
-		AclOperationDelete:          "Delete",
-		AclOperationAlter:           "Alter",
-		AclOperationDescribe:        "Describe",
-		AclOperationClusterAction:   "ClusterAction",
-		AclOperationDescribeConfigs: "DescribeConfigs",
-		AclOperationAlterConfigs:    "AlterConfigs",
-		AclOperationIdempotentWrite: "IdempotentWrite",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclOperationUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclOperation (name without prefix)
-func (a *AclOperation) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation
-func (a *AclOperation) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclOperation{
-		"unknown":         AclOperationUnknown,
-		"any":             AclOperationAny,
-		"all":             AclOperationAll,
-		"read":            AclOperationRead,
-		"write":           AclOperationWrite,
-		"create":          AclOperationCreate,
-		"delete":          AclOperationDelete,
-		"alter":           AclOperationAlter,
-		"describe":        AclOperationDescribe,
-		"clusteraction":   AclOperationClusterAction,
-		"describeconfigs": AclOperationDescribeConfigs,
-		"alterconfigs":    AclOperationAlterConfigs,
-		"idempotentwrite": AclOperationIdempotentWrite,
-	}
-	ao, ok := mapping[normalized]
-	if !ok {
-		*a = AclOperationUnknown
-		return fmt.Errorf("no acl operation with name %s", normalized)
-	}
-	*a = ao
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
-const (
-	AclPermissionUnknown AclPermissionType = iota
-	AclPermissionAny
-	AclPermissionDeny
-	AclPermissionAllow
-)
-
-func (a *AclPermissionType) String() string {
-	mapping := map[AclPermissionType]string{
-		AclPermissionUnknown: "Unknown",
-		AclPermissionAny:     "Any",
-		AclPermissionDeny:    "Deny",
-		AclPermissionAllow:   "Allow",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclPermissionUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclPermissionType (name without prefix)
-func (a *AclPermissionType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType
-func (a *AclPermissionType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclPermissionType{
-		"unknown": AclPermissionUnknown,
-		"any":     AclPermissionAny,
-		"deny":    AclPermissionDeny,
-		"allow":   AclPermissionAllow,
-	}
-
-	apt, ok := mapping[normalized]
-	if !ok {
-		*a = AclPermissionUnknown
-		return fmt.Errorf("no acl permission with name %s", normalized)
-	}
-	*a = apt
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
-const (
-	AclResourceUnknown AclResourceType = iota
-	AclResourceAny
-	AclResourceTopic
-	AclResourceGroup
-	AclResourceCluster
-	AclResourceTransactionalID
-	AclResourceDelegationToken
-)
-
-func (a *AclResourceType) String() string {
-	mapping := map[AclResourceType]string{
-		AclResourceUnknown:         "Unknown",
-		AclResourceAny:             "Any",
-		AclResourceTopic:           "Topic",
-		AclResourceGroup:           "Group",
-		AclResourceCluster:         "Cluster",
-		AclResourceTransactionalID: "TransactionalID",
-		AclResourceDelegationToken: "DelegationToken",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclResourceUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclResourceType (name without prefix)
-func (a *AclResourceType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType
-func (a *AclResourceType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclResourceType{
-		"unknown":         AclResourceUnknown,
-		"any":             AclResourceAny,
-		"topic":           AclResourceTopic,
-		"group":           AclResourceGroup,
-		"cluster":         AclResourceCluster,
-		"transactionalid": AclResourceTransactionalID,
-		"delegationtoken": AclResourceDelegationToken,
-	}
-
-	art, ok := mapping[normalized]
-	if !ok {
-		*a = AclResourceUnknown
-		return fmt.Errorf("no acl resource with name %s", normalized)
-	}
-	*a = art
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
-const (
-	AclPatternUnknown AclResourcePatternType = iota
-	AclPatternAny
-	AclPatternMatch
-	AclPatternLiteral
-	AclPatternPrefixed
-)
-
-func (a *AclResourcePatternType) String() string {
-	mapping := map[AclResourcePatternType]string{
-		AclPatternUnknown:  "Unknown",
-		AclPatternAny:      "Any",
-		AclPatternMatch:    "Match",
-		AclPatternLiteral:  "Literal",
-		AclPatternPrefixed: "Prefixed",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclPatternUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
-func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType
-func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclResourcePatternType{
-		"unknown":  AclPatternUnknown,
-		"any":      AclPatternAny,
-		"match":    AclPatternMatch,
-		"literal":  AclPatternLiteral,
-		"prefixed": AclPatternPrefixed,
-	}
-
-	arpt, ok := mapping[normalized]
-	if !ok {
-		*a = AclPatternUnknown
-		return fmt.Errorf("no acl resource pattern with name %s", normalized)
-	}
-	*a = arpt
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
deleted file mode 100644
index a96af93..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package sarama
-
-// AddOffsetsToTxnRequest adds offsets to a transaction request
-type AddOffsetsToTxnRequest struct {
-	TransactionalID string
-	ProducerID      int64
-	ProducerEpoch   int16
-	GroupID         string
-}
-
-func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-
-	pe.putInt64(a.ProducerID)
-
-	pe.putInt16(a.ProducerEpoch)
-
-	if err := pe.putString(a.GroupID); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-	if a.GroupID, err = pd.getString(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (a *AddOffsetsToTxnRequest) key() int16 {
-	return 25
-}
-
-func (a *AddOffsetsToTxnRequest) version() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
deleted file mode 100644
index bb61973..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-// AddOffsetsToTxnResponse is a response type for adding offsets to txns
-type AddOffsetsToTxnResponse struct {
-	ThrottleTime time.Duration
-	Err          KError
-}
-
-func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(a.Err))
-	return nil
-}
-
-func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.Err = KError(kerr)
-
-	return nil
-}
-
-func (a *AddOffsetsToTxnResponse) key() int16 {
-	return 25
-}
-
-func (a *AddOffsetsToTxnResponse) version() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
deleted file mode 100644
index 57ecf64..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sarama
-
-// AddPartitionsToTxnRequest is a add paartition request
-type AddPartitionsToTxnRequest struct {
-	TransactionalID string
-	ProducerID      int64
-	ProducerEpoch   int16
-	TopicPartitions map[string][]int32
-}
-
-func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-	pe.putInt64(a.ProducerID)
-	pe.putInt16(a.ProducerEpoch)
-
-	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
-		return err
-	}
-	for topic, partitions := range a.TopicPartitions {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putInt32Array(partitions); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.TopicPartitions = make(map[string][]int32)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		partitions, err := pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-
-		a.TopicPartitions[topic] = partitions
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnRequest) key() int16 {
-	return 24
-}
-
-func (a *AddPartitionsToTxnRequest) version() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
deleted file mode 100644
index 0989565..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-// AddPartitionsToTxnResponse is a partition errors to transaction type
-type AddPartitionsToTxnResponse struct {
-	ThrottleTime time.Duration
-	Errors       map[string][]*PartitionError
-}
-
-func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(a.Errors)); err != nil {
-		return err
-	}
-
-	for topic, e := range a.Errors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(e)); err != nil {
-			return err
-		}
-		for _, partitionError := range e {
-			if err := partitionError.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Errors = make(map[string][]*PartitionError)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		a.Errors[topic] = make([]*PartitionError, m)
-
-		for j := 0; j < m; j++ {
-			a.Errors[topic][j] = new(PartitionError)
-			if err := a.Errors[topic][j].decode(pd, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnResponse) key() int16 {
-	return 24
-}
-
-func (a *AddPartitionsToTxnResponse) version() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// PartitionError is a partition error type
-type PartitionError struct {
-	Partition int32
-	Err       KError
-}
-
-func (p *PartitionError) encode(pe packetEncoder) error {
-	pe.putInt32(p.Partition)
-	pe.putInt16(int16(p.Err))
-	return nil
-}
-
-func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
-	if p.Partition, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	p.Err = KError(kerr)
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
deleted file mode 100644
index abe18b1..0000000
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ /dev/null
@@ -1,1005 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-	"math/rand"
-	"strconv"
-	"sync"
-	"time"
-)
-
-// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
-// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
-// Methods with stricter requirements will specify the minimum broker version required.
-// You MUST call Close() on a client to avoid leaks
-type ClusterAdmin interface {
-	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
-	// It may take several seconds after CreateTopic returns success for all the brokers
-	// to become aware that the topic has been created. During this time, listTopics
-	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
-	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
-
-	// List the topics available in the cluster with the default options.
-	ListTopics() (map[string]TopicDetail, error)
-
-	// Describe some topics in the cluster.
-	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
-
-	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
-	// and for all the brokers to become aware that the topics are gone.
-	// During this time, listTopics  may continue to return information about the deleted topic.
-	// If delete.topic.enable is false on the brokers, deleteTopic will mark
-	// the topic for deletion, but not actually delete them.
-	// This operation is supported by brokers with version 0.10.1.0 or higher.
-	DeleteTopic(topic string) error
-
-	// Increase the number of partitions of the topics  according to the corresponding values.
-	// If partitions are increased for a topic that has a key, the partition logic or ordering of
-	// the messages will be affected. It may take several seconds after this method returns
-	// success for all the brokers to become aware that the partitions have been created.
-	// During this time, ClusterAdmin#describeTopics may not return information about the
-	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
-	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
-
-	// Alter the replica assignment for partitions.
-	// This operation is supported by brokers with version 2.4.0.0 or higher.
-	AlterPartitionReassignments(topic string, assignment [][]int32) error
-
-	// Provides info on ongoing partitions replica reassignments.
-	// This operation is supported by brokers with version 2.4.0.0 or higher.
-	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
-
-	// Delete records whose offset is smaller than the given offset of the corresponding partition.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
-
-	// Get the configuration for the specified resources.
-	// The returned configuration includes default values and the Default is true
-	// can be used to distinguish them from user supplied values.
-	// Config entries where ReadOnly is true cannot be updated.
-	// The value of config entries where Sensitive is true is always nil so
-	// sensitive information is not disclosed.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
-
-	// Update the configuration for the specified resources with the default options.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	// The resources with their configs (topic is the only resource type with configs
-	// that can be updated currently Updates are not transactional so they may succeed
-	// for some resources while fail for others. The configs for a particular resource are updated automatically.
-	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
-
-	// Creates access control lists (ACLs) which are bound to specific resources.
-	// This operation is not transactional so it may succeed for some ACLs while fail for others.
-	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
-	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
-	CreateACL(resource Resource, acl Acl) error
-
-	// Lists access control lists (ACLs) according to the supplied filter.
-	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	ListAcls(filter AclFilter) ([]ResourceAcls, error)
-
-	// Deletes access control lists (ACLs) according to the supplied filters.
-	// This operation is not transactional so it may succeed for some ACLs while fail for others.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
-
-	// List the consumer groups available in the cluster.
-	ListConsumerGroups() (map[string]string, error)
-
-	// Describe the given consumer groups.
-	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
-
-	// List the consumer group offsets available in the cluster.
-	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
-
-	// Delete a consumer group.
-	DeleteConsumerGroup(group string) error
-
-	// Get information about the nodes in the cluster
-	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
-
-	// Get information about all log directories on the given set of brokers
-	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
-
-	// Get information about SCRAM users
-	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
-
-	// Delete SCRAM users
-	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
-
-	// Upsert SCRAM users
-	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
-
-	// Close shuts down the admin and closes underlying client.
-	Close() error
-}
-
-type clusterAdmin struct {
-	client Client
-	conf   *Config
-}
-
-// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
-func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
-	client, err := NewClient(addrs, conf)
-	if err != nil {
-		return nil, err
-	}
-	return NewClusterAdminFromClient(client)
-}
-
-// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
-// Note that underlying client will also be closed on admin's Close() call.
-func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
-	// make sure we can retrieve the controller
-	_, err := client.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	ca := &clusterAdmin{
-		client: client,
-		conf:   client.Config(),
-	}
-	return ca, nil
-}
-
-func (ca *clusterAdmin) Close() error {
-	return ca.client.Close()
-}
-
-func (ca *clusterAdmin) Controller() (*Broker, error) {
-	return ca.client.Controller()
-}
-
-func (ca *clusterAdmin) refreshController() (*Broker, error) {
-	return ca.client.RefreshController()
-}
-
-// isErrNoController returns `true` if the given error type unwraps to an
-// `ErrNotController` response from Kafka
-func isErrNoController(err error) bool {
-	switch e := err.(type) {
-	case *TopicError:
-		return e.Err == ErrNotController
-	case *TopicPartitionError:
-		return e.Err == ErrNotController
-	case KError:
-		return e == ErrNotController
-	}
-	return false
-}
-
-// retryOnError will repeatedly call the given (error-returning) func in the
-// case that its response is non-nil and retryable (as determined by the
-// provided retryable func) up to the maximum number of tries permitted by
-// the admin client configuration
-func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
-	var err error
-	for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
-		err = fn()
-		if err == nil || !retryable(err) {
-			return err
-		}
-		Logger.Printf(
-			"admin/request retrying after %dms... (%d attempts remaining)\n",
-			ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
-		time.Sleep(ca.conf.Admin.Retry.Backoff)
-		continue
-	}
-	return err
-}
-
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	if detail == nil {
-		return errors.New("you must specify topic details")
-	}
-
-	topicDetails := make(map[string]*TopicDetail)
-	topicDetails[topic] = detail
-
-	request := &CreateTopicsRequest{
-		TopicDetails: topicDetails,
-		ValidateOnly: validateOnly,
-		Timeout:      ca.conf.Admin.Timeout,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 1
-	}
-	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
-		request.Version = 2
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.CreateTopics(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicErrors[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr.Err != ErrNoError {
-			if topicErr.Err == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
-	controller, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	request := &MetadataRequest{
-		Topics:                 topics,
-		AllowAutoTopicCreation: false,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
-		request.Version = 5
-	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 4
-	}
-
-	response, err := controller.GetMetadata(request)
-	if err != nil {
-		return nil, err
-	}
-	return response.Topics, nil
-}
-
-func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
-	controller, err := ca.Controller()
-	if err != nil {
-		return nil, int32(0), err
-	}
-
-	request := &MetadataRequest{
-		Topics: []string{},
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_10_0_0) {
-		request.Version = 1
-	}
-
-	response, err := controller.GetMetadata(request)
-	if err != nil {
-		return nil, int32(0), err
-	}
-
-	return response.Brokers, response.ControllerID, nil
-}
-
-func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
-	brokers := ca.client.Brokers()
-	for _, b := range brokers {
-		if b.ID() == id {
-			return b, nil
-		}
-	}
-	return nil, fmt.Errorf("could not find broker id %d", id)
-}
-
-func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
-	brokers := ca.client.Brokers()
-	if len(brokers) > 0 {
-		index := rand.Intn(len(brokers))
-		return brokers[index], nil
-	}
-	return nil, errors.New("no available broker")
-}
-
-func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
-	// In order to build TopicDetails we need to first get the list of all
-	// topics using a MetadataRequest and then get their configs using a
-	// DescribeConfigsRequest request. To avoid sending many requests to the
-	// broker, we use a single DescribeConfigsRequest.
-
-	// Send the all-topic MetadataRequest
-	b, err := ca.findAnyBroker()
-	if err != nil {
-		return nil, err
-	}
-	_ = b.Open(ca.client.Config())
-
-	metadataReq := &MetadataRequest{}
-	metadataResp, err := b.GetMetadata(metadataReq)
-	if err != nil {
-		return nil, err
-	}
-
-	topicsDetailsMap := make(map[string]TopicDetail)
-
-	var describeConfigsResources []*ConfigResource
-
-	for _, topic := range metadataResp.Topics {
-		topicDetails := TopicDetail{
-			NumPartitions: int32(len(topic.Partitions)),
-		}
-		if len(topic.Partitions) > 0 {
-			topicDetails.ReplicaAssignment = map[int32][]int32{}
-			for _, partition := range topic.Partitions {
-				topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
-			}
-			topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
-		}
-		topicsDetailsMap[topic.Name] = topicDetails
-
-		// we populate the resources we want to describe from the MetadataResponse
-		topicResource := ConfigResource{
-			Type: TopicResource,
-			Name: topic.Name,
-		}
-		describeConfigsResources = append(describeConfigsResources, &topicResource)
-	}
-
-	// Send the DescribeConfigsRequest
-	describeConfigsReq := &DescribeConfigsRequest{
-		Resources: describeConfigsResources,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
-		describeConfigsReq.Version = 1
-	}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		describeConfigsReq.Version = 2
-	}
-
-	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
-	if err != nil {
-		return nil, err
-	}
-
-	for _, resource := range describeConfigsResp.Resources {
-		topicDetails := topicsDetailsMap[resource.Name]
-		topicDetails.ConfigEntries = make(map[string]*string)
-
-		for _, entry := range resource.Configs {
-			// only include non-default non-sensitive config
-			// (don't actually think topic config will ever be sensitive)
-			if entry.Default || entry.Sensitive {
-				continue
-			}
-			topicDetails.ConfigEntries[entry.Name] = &entry.Value
-		}
-
-		topicsDetailsMap[resource.Name] = topicDetails
-	}
-
-	return topicsDetailsMap, nil
-}
-
-func (ca *clusterAdmin) DeleteTopic(topic string) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	request := &DeleteTopicsRequest{
-		Topics:  []string{topic},
-		Timeout: ca.conf.Admin.Timeout,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 1
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.DeleteTopics(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicErrorCodes[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr != ErrNoError {
-			if topicErr == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	topicPartitions := make(map[string]*TopicPartition)
-	topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
-
-	request := &CreatePartitionsRequest{
-		TopicPartitions: topicPartitions,
-		Timeout:         ca.conf.Admin.Timeout,
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.CreatePartitions(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicPartitionErrors[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr.Err != ErrNoError {
-			if topicErr.Err == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	request := &AlterPartitionReassignmentsRequest{
-		TimeoutMs: int32(60000),
-		Version:   int16(0),
-	}
-
-	for i := 0; i < len(assignment); i++ {
-		request.AddBlock(topic, int32(i), assignment[i])
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		errs := make([]error, 0)
-
-		rsp, err := b.AlterPartitionReassignments(request)
-
-		if err != nil {
-			errs = append(errs, err)
-		} else {
-			if rsp.ErrorCode > 0 {
-				errs = append(errs, errors.New(rsp.ErrorCode.Error()))
-			}
-
-			for topic, topicErrors := range rsp.Errors {
-				for partition, partitionError := range topicErrors {
-					if partitionError.errorCode != ErrNoError {
-						errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
-						errs = append(errs, errors.New(errStr))
-					}
-				}
-			}
-		}
-
-		if len(errs) > 0 {
-			return ErrReassignPartitions{MultiError{&errs}}
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
-	if topic == "" {
-		return nil, ErrInvalidTopic
-	}
-
-	request := &ListPartitionReassignmentsRequest{
-		TimeoutMs: int32(60000),
-		Version:   int16(0),
-	}
-
-	request.AddBlock(topic, partitions)
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-	_ = b.Open(ca.client.Config())
-
-	rsp, err := b.ListPartitionReassignments(request)
-
-	if err == nil && rsp != nil {
-		return rsp.TopicStatus, nil
-	} else {
-		return nil, err
-	}
-}
-
-func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-	partitionPerBroker := make(map[*Broker][]int32)
-	for partition := range partitionOffsets {
-		broker, err := ca.client.Leader(topic, partition)
-		if err != nil {
-			return err
-		}
-		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
-	}
-	errs := make([]error, 0)
-	for broker, partitions := range partitionPerBroker {
-		topics := make(map[string]*DeleteRecordsRequestTopic)
-		recordsToDelete := make(map[int32]int64)
-		for _, p := range partitions {
-			recordsToDelete[p] = partitionOffsets[p]
-		}
-		topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
-		request := &DeleteRecordsRequest{
-			Topics:  topics,
-			Timeout: ca.conf.Admin.Timeout,
-		}
-
-		rsp, err := broker.DeleteRecords(request)
-		if err != nil {
-			errs = append(errs, err)
-		} else {
-			deleteRecordsResponseTopic, ok := rsp.Topics[topic]
-			if !ok {
-				errs = append(errs, ErrIncompleteResponse)
-			} else {
-				for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
-					if deleteRecordsResponsePartition.Err != ErrNoError {
-						errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
-					}
-				}
-			}
-		}
-	}
-	if len(errs) > 0 {
-		return ErrDeleteRecords{MultiError{&errs}}
-	}
-	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
-	// for each partition instead of one error
-	return nil
-}
-
-// Returns a bool indicating whether the resource request needs to go to a
-// specific broker
-func dependsOnSpecificNode(resource ConfigResource) bool {
-	return (resource.Type == BrokerResource && resource.Name != "") ||
-		resource.Type == BrokerLoggerResource
-}
-
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
-	var entries []ConfigEntry
-	var resources []*ConfigResource
-	resources = append(resources, &resource)
-
-	request := &DescribeConfigsRequest{
-		Resources: resources,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
-		request.Version = 1
-	}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 2
-	}
-
-	var (
-		b   *Broker
-		err error
-	)
-
-	// DescribeConfig of broker/broker logger must be sent to the broker in question
-	if dependsOnSpecificNode(resource) {
-		var id int64
-		id, err = strconv.ParseInt(resource.Name, 10, 32)
-		if err != nil {
-			return nil, err
-		}
-		b, err = ca.findBroker(int32(id))
-	} else {
-		b, err = ca.findAnyBroker()
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	_ = b.Open(ca.client.Config())
-	rsp, err := b.DescribeConfigs(request)
-	if err != nil {
-		return nil, err
-	}
-
-	for _, rspResource := range rsp.Resources {
-		if rspResource.Name == resource.Name {
-			if rspResource.ErrorMsg != "" {
-				return nil, errors.New(rspResource.ErrorMsg)
-			}
-			if rspResource.ErrorCode != 0 {
-				return nil, KError(rspResource.ErrorCode)
-			}
-			for _, cfgEntry := range rspResource.Configs {
-				entries = append(entries, *cfgEntry)
-			}
-		}
-	}
-	return entries, nil
-}
-
-func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-	var resources []*AlterConfigsResource
-	resources = append(resources, &AlterConfigsResource{
-		Type:          resourceType,
-		Name:          name,
-		ConfigEntries: entries,
-	})
-
-	request := &AlterConfigsRequest{
-		Resources:    resources,
-		ValidateOnly: validateOnly,
-	}
-
-	var (
-		b   *Broker
-		err error
-	)
-
-	// AlterConfig of broker/broker logger must be sent to the broker in question
-	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
-		var id int64
-		id, err = strconv.ParseInt(name, 10, 32)
-		if err != nil {
-			return err
-		}
-		b, err = ca.findBroker(int32(id))
-	} else {
-		b, err = ca.findAnyBroker()
-	}
-	if err != nil {
-		return err
-	}
-
-	_ = b.Open(ca.client.Config())
-	rsp, err := b.AlterConfigs(request)
-	if err != nil {
-		return err
-	}
-
-	for _, rspResource := range rsp.Resources {
-		if rspResource.Name == name {
-			if rspResource.ErrorMsg != "" {
-				return errors.New(rspResource.ErrorMsg)
-			}
-			if rspResource.ErrorCode != 0 {
-				return KError(rspResource.ErrorCode)
-			}
-		}
-	}
-	return nil
-}
-
-func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
-	var acls []*AclCreation
-	acls = append(acls, &AclCreation{resource, acl})
-	request := &CreateAclsRequest{AclCreations: acls}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
-
-	_, err = b.CreateAcls(request)
-	return err
-}
-
-func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-	request := &DescribeAclsRequest{AclFilter: filter}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DescribeAcls(request)
-	if err != nil {
-		return nil, err
-	}
-
-	var lAcls []ResourceAcls
-	for _, rAcl := range rsp.ResourceAcls {
-		lAcls = append(lAcls, *rAcl)
-	}
-	return lAcls, nil
-}
-
-func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
-	var filters []*AclFilter
-	filters = append(filters, &filter)
-	request := &DeleteAclsRequest{Filters: filters}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DeleteAcls(request)
-	if err != nil {
-		return nil, err
-	}
-
-	var mAcls []MatchingAcl
-	for _, fr := range rsp.FilterResponses {
-		for _, mACL := range fr.MatchingAcls {
-			mAcls = append(mAcls, *mACL)
-		}
-	}
-	return mAcls, nil
-}
-
-func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
-	groupsPerBroker := make(map[*Broker][]string)
-
-	for _, group := range groups {
-		controller, err := ca.client.Coordinator(group)
-		if err != nil {
-			return nil, err
-		}
-		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-	}
-
-	for broker, brokerGroups := range groupsPerBroker {
-		response, err := broker.DescribeGroups(&DescribeGroupsRequest{
-			Groups: brokerGroups,
-		})
-		if err != nil {
-			return nil, err
-		}
-
-		result = append(result, response.Groups...)
-	}
-	return result, nil
-}
-
-func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
-	allGroups = make(map[string]string)
-
-	// Query brokers in parallel, since we have to query *all* brokers
-	brokers := ca.client.Brokers()
-	groupMaps := make(chan map[string]string, len(brokers))
-	errChan := make(chan error, len(brokers))
-	wg := sync.WaitGroup{}
-
-	for _, b := range brokers {
-		wg.Add(1)
-		go func(b *Broker, conf *Config) {
-			defer wg.Done()
-			_ = b.Open(conf) // Ensure that broker is opened
-
-			response, err := b.ListGroups(&ListGroupsRequest{})
-			if err != nil {
-				errChan <- err
-				return
-			}
-
-			groups := make(map[string]string)
-			for group, typ := range response.Groups {
-				groups[group] = typ
-			}
-
-			groupMaps <- groups
-		}(b, ca.conf)
-	}
-
-	wg.Wait()
-	close(groupMaps)
-	close(errChan)
-
-	for groupMap := range groupMaps {
-		for group, protocolType := range groupMap {
-			allGroups[group] = protocolType
-		}
-	}
-
-	// Intentionally return only the first error for simplicity
-	err = <-errChan
-	return
-}
-
-func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
-	coordinator, err := ca.client.Coordinator(group)
-	if err != nil {
-		return nil, err
-	}
-
-	request := &OffsetFetchRequest{
-		ConsumerGroup: group,
-		partitions:    topicPartitions,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_10_2_0) {
-		request.Version = 2
-	} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
-		request.Version = 1
-	}
-
-	return coordinator.FetchOffset(request)
-}
-
-func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
-	coordinator, err := ca.client.Coordinator(group)
-	if err != nil {
-		return err
-	}
-
-	request := &DeleteGroupsRequest{
-		Groups: []string{group},
-	}
-
-	resp, err := coordinator.DeleteGroups(request)
-	if err != nil {
-		return err
-	}
-
-	groupErr, ok := resp.GroupErrorCodes[group]
-	if !ok {
-		return ErrIncompleteResponse
-	}
-
-	if groupErr != ErrNoError {
-		return groupErr
-	}
-
-	return nil
-}
-
-func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
-	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
-
-	// Query brokers in parallel, since we may have to query multiple brokers
-	logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
-	errChan := make(chan error, len(brokerIds))
-	wg := sync.WaitGroup{}
-
-	for _, b := range brokerIds {
-		wg.Add(1)
-		broker, err := ca.findBroker(b)
-		if err != nil {
-			Logger.Printf("Unable to find broker with ID = %v\n", b)
-			continue
-		}
-		go func(b *Broker, conf *Config) {
-			defer wg.Done()
-			_ = b.Open(conf) // Ensure that broker is opened
-
-			response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
-			if err != nil {
-				errChan <- err
-				return
-			}
-			logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
-			logDirs[b.ID()] = response.LogDirs
-			logDirsMaps <- logDirs
-		}(broker, ca.conf)
-	}
-
-	wg.Wait()
-	close(logDirsMaps)
-	close(errChan)
-
-	for logDirsMap := range logDirsMaps {
-		for id, logDirs := range logDirsMap {
-			allLogDirs[id] = logDirs
-		}
-	}
-
-	// Intentionally return only the first error for simplicity
-	err = <-errChan
-	return
-}
-
-func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
-	req := &DescribeUserScramCredentialsRequest{}
-	for _, u := range users {
-		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
-			Name: u,
-		})
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DescribeUserScramCredentials(req)
-	if err != nil {
-		return nil, err
-	}
-
-	return rsp.Results, nil
-}
-
-func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
-	res, err := ca.AlterUserScramCredentials(upsert, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
-	res, err := ca.AlterUserScramCredentials(nil, delete)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
-	req := &AlterUserScramCredentialsRequest{
-		Deletions:  d,
-		Upsertions: u,
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.AlterUserScramCredentials(req)
-	if err != nil {
-		return nil, err
-	}
-
-	return rsp.Results, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
deleted file mode 100644
index 8b94b1f..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package sarama
-
-// AlterConfigsRequest is an alter config request type
-type AlterConfigsRequest struct {
-	Resources    []*AlterConfigsResource
-	ValidateOnly bool
-}
-
-// AlterConfigsResource is an alter config resource type
-type AlterConfigsResource struct {
-	Type          ConfigResourceType
-	Name          string
-	ConfigEntries map[string]*string
-}
-
-func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, r := range a.Resources {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putBool(a.ValidateOnly)
-	return nil
-}
-
-func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
-	resourceCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResource, resourceCount)
-	for i := range a.Resources {
-		r := &AlterConfigsResource{}
-		err = r.decode(pd, version)
-		if err != nil {
-			return err
-		}
-		a.Resources[i] = r
-	}
-
-	validateOnly, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-
-	a.ValidateOnly = validateOnly
-
-	return nil
-}
-
-func (a *AlterConfigsResource) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Type))
-
-	if err := pe.putString(a.Name); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
-		return err
-	}
-	for configKey, configValue := range a.ConfigEntries {
-		if err := pe.putString(configKey); err != nil {
-			return err
-		}
-		if err := pe.putNullableString(configValue); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		a.ConfigEntries = make(map[string]*string, n)
-		for i := 0; i < n; i++ {
-			configKey, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
-				return err
-			}
-		}
-	}
-	return err
-}
-
-func (a *AlterConfigsRequest) key() int16 {
-	return 33
-}
-
-func (a *AlterConfigsRequest) version() int16 {
-	return 0
-}
-
-func (a *AlterConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
deleted file mode 100644
index cfb6369..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package sarama
-
-import "time"
-
-// AlterConfigsResponse is a response type for alter config
-type AlterConfigsResponse struct {
-	ThrottleTime time.Duration
-	Resources    []*AlterConfigsResourceResponse
-}
-
-// AlterConfigsResourceResponse is a response type for alter config resource
-type AlterConfigsResourceResponse struct {
-	ErrorCode int16
-	ErrorMsg  string
-	Type      ConfigResourceType
-	Name      string
-}
-
-func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, v := range a.Resources {
-		if err := v.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	responseCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
-	for i := range a.Resources {
-		a.Resources[i] = new(AlterConfigsResourceResponse)
-
-		if err := a.Resources[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
-	pe.putInt16(a.ErrorCode)
-	err := pe.putString(a.ErrorMsg)
-	if err != nil {
-		return nil
-	}
-	pe.putInt8(int8(a.Type))
-	err = pe.putString(a.Name)
-	if err != nil {
-		return nil
-	}
-	return nil
-}
-
-func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
-	errCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.ErrorCode = errCode
-
-	e, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.ErrorMsg = e
-
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	return nil
-}
-
-func (a *AlterConfigsResponse) key() int16 {
-	return 32
-}
-
-func (a *AlterConfigsResponse) version() int16 {
-	return 0
-}
-
-func (a *AlterConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
deleted file mode 100644
index f0a2f9d..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsBlock struct {
-	replicas []int32
-}
-
-func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
-	if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
-		return err
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
-	if b.replicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-	return nil
-}
-
-type AlterPartitionReassignmentsRequest struct {
-	TimeoutMs int32
-	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
-	Version   int16
-}
-
-func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
-	pe.putInt32(r.TimeoutMs)
-
-	pe.putCompactArrayLength(len(r.blocks))
-
-	for topic, partitions := range r.blocks {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.TimeoutMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	topicCount, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount > 0 {
-		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
-		for i := 0; i < topicCount; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				block := &alterPartitionReassignmentsBlock{}
-				if err := block.decode(pd); err != nil {
-					return err
-				}
-				r.blocks[topic][partition] = block
-
-				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return
-}
-
-func (r *AlterPartitionReassignmentsRequest) key() int16 {
-	return 45
-}
-
-func (r *AlterPartitionReassignmentsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
-
-func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
-	}
-
-	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
deleted file mode 100644
index b3f9a15..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsErrorBlock struct {
-	errorCode    KError
-	errorMessage *string
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
-	pe.putInt16(int16(b.errorCode))
-	if err := pe.putNullableCompactString(b.errorMessage); err != nil {
-		return err
-	}
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
-	errorCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.errorCode = KError(errorCode)
-	b.errorMessage, err = pd.getCompactNullableString()
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return err
-}
-
-type AlterPartitionReassignmentsResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	ErrorCode      KError
-	ErrorMessage   *string
-	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
-}
-
-func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
-	if r.Errors == nil {
-		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
-	}
-	partitions := r.Errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
-		r.Errors[topic] = partitions
-	}
-
-	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
-}
-
-func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(r.ThrottleTimeMs)
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.Errors))
-	for topic, partitions := range r.Errors {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numTopics, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numTopics > 0 {
-		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
-		for i := 0; i < numTopics; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-
-			ongoingPartitionReassignments, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-
-			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
-
-			for j := 0; j < ongoingPartitionReassignments; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				block := &alterPartitionReassignmentsErrorBlock{}
-				if err := block.decode(pd); err != nil {
-					return err
-				}
-
-				r.Errors[topic][partition] = block
-			}
-			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) key() int16 {
-	return 45
-}
-
-func (r *AlterPartitionReassignmentsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
-	return 1
-}
-
-func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
deleted file mode 100644
index 0530d89..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package sarama
-
-type AlterUserScramCredentialsRequest struct {
-	Version int16
-
-	// Deletions represent list of SCRAM credentials to remove
-	Deletions []AlterUserScramCredentialsDelete
-
-	// Upsertions represent list of SCRAM credentials to update/insert
-	Upsertions []AlterUserScramCredentialsUpsert
-}
-
-type AlterUserScramCredentialsDelete struct {
-	Name      string
-	Mechanism ScramMechanismType
-}
-
-type AlterUserScramCredentialsUpsert struct {
-	Name           string
-	Mechanism      ScramMechanismType
-	Iterations     int32
-	Salt           []byte
-	saltedPassword []byte
-
-	// This field is never transmitted over the wire
-	// @see: https://tools.ietf.org/html/rfc5802
-	Password []byte
-}
-
-func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
-	pe.putCompactArrayLength(len(r.Deletions))
-	for _, d := range r.Deletions {
-		if err := pe.putCompactString(d.Name); err != nil {
-			return err
-		}
-		pe.putInt8(int8(d.Mechanism))
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putCompactArrayLength(len(r.Upsertions))
-	for _, u := range r.Upsertions {
-		if err := pe.putCompactString(u.Name); err != nil {
-			return err
-		}
-		pe.putInt8(int8(u.Mechanism))
-		pe.putInt32(u.Iterations)
-
-		if err := pe.putCompactBytes(u.Salt); err != nil {
-			return err
-		}
-
-		// do not transmit the password over the wire
-		formatter := scramFormatter{mechanism: u.Mechanism}
-		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
-		if err != nil {
-			return err
-		}
-
-		if err := pe.putCompactBytes(salted); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
-	numDeletions, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
-	for i := 0; i < numDeletions; i++ {
-		r.Deletions[i] = AlterUserScramCredentialsDelete{}
-		if r.Deletions[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		mechanism, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	numUpsertions, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
-	for i := 0; i < numUpsertions; i++ {
-		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
-		if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		mechanism, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-
-		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
-		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
-			return err
-		}
-		if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil {
-			return err
-		}
-		if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil {
-			return err
-		}
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) key() int16 {
-	return 51
-}
-
-func (r *AlterUserScramCredentialsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
deleted file mode 100644
index 31e167b..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-type AlterUserScramCredentialsResponse struct {
-	Version int16
-
-	ThrottleTime time.Duration
-
-	Results []*AlterUserScramCredentialsResult
-}
-
-type AlterUserScramCredentialsResult struct {
-	User string
-
-	ErrorCode    KError
-	ErrorMessage *string
-}
-
-func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	pe.putCompactArrayLength(len(r.Results))
-
-	for _, u := range r.Results {
-		if err := pe.putCompactString(u.User); err != nil {
-			return err
-		}
-		pe.putInt16(int16(u.ErrorCode))
-		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	numResults, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numResults > 0 {
-		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
-		for i := 0; i < numResults; i++ {
-			r.Results[i] = &AlterUserScramCredentialsResult{}
-			if r.Results[i].User, err = pd.getCompactString(); err != nil {
-				return err
-			}
-
-			kerr, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-
-			r.Results[i].ErrorCode = KError(kerr)
-			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-				return err
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) key() int16 {
-	return 51
-}
-
-func (r *AlterUserScramCredentialsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
deleted file mode 100644
index bee92c0..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package sarama
-
-// ApiVersionsRequest ...
-type ApiVersionsRequest struct{}
-
-func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
-	return nil
-}
-
-func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
-	return nil
-}
-
-func (a *ApiVersionsRequest) key() int16 {
-	return 18
-}
-
-func (a *ApiVersionsRequest) version() int16 {
-	return 0
-}
-
-func (a *ApiVersionsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
deleted file mode 100644
index 0e72e39..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package sarama
-
-// ApiVersionsResponseBlock is an api version response block type
-type ApiVersionsResponseBlock struct {
-	ApiKey     int16
-	MinVersion int16
-	MaxVersion int16
-}
-
-func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
-	pe.putInt16(b.ApiKey)
-	pe.putInt16(b.MinVersion)
-	pe.putInt16(b.MaxVersion)
-	return nil
-}
-
-func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
-	var err error
-
-	if b.ApiKey, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.MinVersion, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.MaxVersion, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// ApiVersionsResponse is an api version response type
-type ApiVersionsResponse struct {
-	Err         KError
-	ApiVersions []*ApiVersionsResponseBlock
-}
-
-func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
-		return err
-	}
-	for _, apiVersion := range r.ApiVersions {
-		if err := apiVersion.encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	numBlocks, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
-	for i := 0; i < numBlocks; i++ {
-		block := new(ApiVersionsResponseBlock)
-		if err := block.decode(pd); err != nil {
-			return err
-		}
-		r.ApiVersions[i] = block
-	}
-
-	return nil
-}
-
-func (r *ApiVersionsResponse) key() int16 {
-	return 18
-}
-
-func (r *ApiVersionsResponse) version() int16 {
-	return 0
-}
-
-func (a *ApiVersionsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
deleted file mode 100644
index 5911f7b..0000000
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ /dev/null
@@ -1,1161 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/eapache/go-resiliency/breaker"
-	"github.com/eapache/queue"
-)
-
-// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
-// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
-// and parses responses for errors. You must read from the Errors() channel or the
-// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
-// leaks: it will not be garbage-collected automatically when it passes out of
-// scope.
-type AsyncProducer interface {
-
-	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
-	// when both the Errors and Successes channels have been closed. When calling
-	// AsyncClose, you *must* continue to read from those channels in order to
-	// drain the results of any messages in flight.
-	AsyncClose()
-
-	// Close shuts down the producer and waits for any buffered messages to be
-	// flushed. You must call this function before a producer object passes out of
-	// scope, as it may otherwise leak memory. You must call this before calling
-	// Close on the underlying client.
-	Close() error
-
-	// Input is the input channel for the user to write messages to that they
-	// wish to send.
-	Input() chan<- *ProducerMessage
-
-	// Successes is the success output channel back to the user when Return.Successes is
-	// enabled. If Return.Successes is true, you MUST read from this channel or the
-	// Producer will deadlock. It is suggested that you send and read messages
-	// together in a single select statement.
-	Successes() <-chan *ProducerMessage
-
-	// Errors is the error output channel back to the user. You MUST read from this
-	// channel or the Producer will deadlock when the channel is full. Alternatively,
-	// you can set Producer.Return.Errors in your config to false, which prevents
-	// errors to be returned.
-	Errors() <-chan *ProducerError
-}
-
-// transactionManager keeps the state necessary to ensure idempotent production
-type transactionManager struct {
-	producerID      int64
-	producerEpoch   int16
-	sequenceNumbers map[string]int32
-	mutex           sync.Mutex
-}
-
-const (
-	noProducerID    = -1
-	noProducerEpoch = -1
-)
-
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
-	key := fmt.Sprintf("%s-%d", topic, partition)
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	sequence := t.sequenceNumbers[key]
-	t.sequenceNumbers[key] = sequence + 1
-	return sequence, t.producerEpoch
-}
-
-func (t *transactionManager) bumpEpoch() {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	t.producerEpoch++
-	for k := range t.sequenceNumbers {
-		t.sequenceNumbers[k] = 0
-	}
-}
-
-func (t *transactionManager) getProducerID() (int64, int16) {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	return t.producerID, t.producerEpoch
-}
-
-func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
-	txnmgr := &transactionManager{
-		producerID:    noProducerID,
-		producerEpoch: noProducerEpoch,
-	}
-
-	if conf.Producer.Idempotent {
-		initProducerIDResponse, err := client.InitProducerID()
-		if err != nil {
-			return nil, err
-		}
-		txnmgr.producerID = initProducerIDResponse.ProducerID
-		txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
-		txnmgr.sequenceNumbers = make(map[string]int32)
-		txnmgr.mutex = sync.Mutex{}
-
-		Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
-	}
-
-	return txnmgr, nil
-}
-
-type asyncProducer struct {
-	client Client
-	conf   *Config
-
-	errors                    chan *ProducerError
-	input, successes, retries chan *ProducerMessage
-	inFlight                  sync.WaitGroup
-
-	brokers    map[*Broker]*brokerProducer
-	brokerRefs map[*brokerProducer]int
-	brokerLock sync.Mutex
-
-	txnmgr *transactionManager
-}
-
-// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
-func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
-	client, err := NewClient(addrs, conf)
-	if err != nil {
-		return nil, err
-	}
-	return newAsyncProducer(client)
-}
-
-// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newAsyncProducer(cli)
-}
-
-func newAsyncProducer(client Client) (AsyncProducer, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	txnmgr, err := newTransactionManager(client.Config(), client)
-	if err != nil {
-		return nil, err
-	}
-
-	p := &asyncProducer{
-		client:     client,
-		conf:       client.Config(),
-		errors:     make(chan *ProducerError),
-		input:      make(chan *ProducerMessage),
-		successes:  make(chan *ProducerMessage),
-		retries:    make(chan *ProducerMessage),
-		brokers:    make(map[*Broker]*brokerProducer),
-		brokerRefs: make(map[*brokerProducer]int),
-		txnmgr:     txnmgr,
-	}
-
-	// launch our singleton dispatchers
-	go withRecover(p.dispatcher)
-	go withRecover(p.retryHandler)
-
-	return p, nil
-}
-
-type flagSet int8
-
-const (
-	syn      flagSet = 1 << iota // first message from partitionProducer to brokerProducer
-	fin                          // final message from partitionProducer to brokerProducer and back
-	shutdown                     // start the shutdown process
-)
-
-// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
-type ProducerMessage struct {
-	Topic string // The Kafka topic for this message.
-	// The partitioning key for this message. Pre-existing Encoders include
-	// StringEncoder and ByteEncoder.
-	Key Encoder
-	// The actual message to store in Kafka. Pre-existing Encoders include
-	// StringEncoder and ByteEncoder.
-	Value Encoder
-
-	// The headers are key-value pairs that are transparently passed
-	// by Kafka between producers and consumers.
-	Headers []RecordHeader
-
-	// This field is used to hold arbitrary data you wish to include so it
-	// will be available when receiving on the Successes and Errors channels.
-	// Sarama completely ignores this field and is only to be used for
-	// pass-through data.
-	Metadata interface{}
-
-	// Below this point are filled in by the producer as the message is processed
-
-	// Offset is the offset of the message stored on the broker. This is only
-	// guaranteed to be defined if the message was successfully delivered and
-	// RequiredAcks is not NoResponse.
-	Offset int64
-	// Partition is the partition that the message was sent to. This is only
-	// guaranteed to be defined if the message was successfully delivered.
-	Partition int32
-	// Timestamp can vary in behaviour depending on broker configuration, being
-	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
-	// and requiring version at least 0.10.0.
-	//
-	// When configured to CreateTime, the timestamp is specified by the producer
-	// either by explicitly setting this field, or when the message is added
-	// to a produce set.
-	//
-	// When configured to LogAppendTime, the timestamp assigned to the message
-	// by the broker. This is only guaranteed to be defined if the message was
-	// successfully delivered and RequiredAcks is not NoResponse.
-	Timestamp time.Time
-
-	retries        int
-	flags          flagSet
-	expectation    chan *ProducerError
-	sequenceNumber int32
-	producerEpoch  int16
-	hasSequence    bool
-}
-
-const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
-
-func (m *ProducerMessage) byteSize(version int) int {
-	var size int
-	if version >= 2 {
-		size = maximumRecordOverhead
-		for _, h := range m.Headers {
-			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
-		}
-	} else {
-		size = producerMessageOverhead
-	}
-	if m.Key != nil {
-		size += m.Key.Length()
-	}
-	if m.Value != nil {
-		size += m.Value.Length()
-	}
-	return size
-}
-
-func (m *ProducerMessage) clear() {
-	m.flags = 0
-	m.retries = 0
-	m.sequenceNumber = 0
-	m.producerEpoch = 0
-	m.hasSequence = false
-}
-
-// ProducerError is the type of error generated when the producer fails to deliver a message.
-// It contains the original ProducerMessage as well as the actual error value.
-type ProducerError struct {
-	Msg *ProducerMessage
-	Err error
-}
-
-func (pe ProducerError) Error() string {
-	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
-}
-
-func (pe ProducerError) Unwrap() error {
-	return pe.Err
-}
-
-// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
-// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
-// when closing a producer.
-type ProducerErrors []*ProducerError
-
-func (pe ProducerErrors) Error() string {
-	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
-}
-
-func (p *asyncProducer) Errors() <-chan *ProducerError {
-	return p.errors
-}
-
-func (p *asyncProducer) Successes() <-chan *ProducerMessage {
-	return p.successes
-}
-
-func (p *asyncProducer) Input() chan<- *ProducerMessage {
-	return p.input
-}
-
-func (p *asyncProducer) Close() error {
-	p.AsyncClose()
-
-	if p.conf.Producer.Return.Successes {
-		go withRecover(func() {
-			for range p.successes {
-			}
-		})
-	}
-
-	var errors ProducerErrors
-	if p.conf.Producer.Return.Errors {
-		for event := range p.errors {
-			errors = append(errors, event)
-		}
-	} else {
-		<-p.errors
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (p *asyncProducer) AsyncClose() {
-	go withRecover(p.shutdown)
-}
-
-// singleton
-// dispatches messages by topic
-func (p *asyncProducer) dispatcher() {
-	handlers := make(map[string]chan<- *ProducerMessage)
-	shuttingDown := false
-
-	for msg := range p.input {
-		if msg == nil {
-			Logger.Println("Something tried to send a nil message, it was ignored.")
-			continue
-		}
-
-		if msg.flags&shutdown != 0 {
-			shuttingDown = true
-			p.inFlight.Done()
-			continue
-		} else if msg.retries == 0 {
-			if shuttingDown {
-				// we can't just call returnError here because that decrements the wait group,
-				// which hasn't been incremented yet for this message, and shouldn't be
-				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
-				if p.conf.Producer.Return.Errors {
-					p.errors <- pErr
-				} else {
-					Logger.Println(pErr)
-				}
-				continue
-			}
-			p.inFlight.Add(1)
-		}
-
-		for _, interceptor := range p.conf.Producer.Interceptors {
-			msg.safelyApplyInterceptor(interceptor)
-		}
-
-		version := 1
-		if p.conf.Version.IsAtLeast(V0_11_0_0) {
-			version = 2
-		} else if msg.Headers != nil {
-			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
-			continue
-		}
-		if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
-			p.returnError(msg, ErrMessageSizeTooLarge)
-			continue
-		}
-
-		handler := handlers[msg.Topic]
-		if handler == nil {
-			handler = p.newTopicProducer(msg.Topic)
-			handlers[msg.Topic] = handler
-		}
-
-		handler <- msg
-	}
-
-	for _, handler := range handlers {
-		close(handler)
-	}
-}
-
-// one per topic
-// partitions messages, then dispatches them by partition
-type topicProducer struct {
-	parent *asyncProducer
-	topic  string
-	input  <-chan *ProducerMessage
-
-	breaker     *breaker.Breaker
-	handlers    map[int32]chan<- *ProducerMessage
-	partitioner Partitioner
-}
-
-func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
-	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
-	tp := &topicProducer{
-		parent:      p,
-		topic:       topic,
-		input:       input,
-		breaker:     breaker.New(3, 1, 10*time.Second),
-		handlers:    make(map[int32]chan<- *ProducerMessage),
-		partitioner: p.conf.Producer.Partitioner(topic),
-	}
-	go withRecover(tp.dispatch)
-	return input
-}
-
-func (tp *topicProducer) dispatch() {
-	for msg := range tp.input {
-		if msg.retries == 0 {
-			if err := tp.partitionMessage(msg); err != nil {
-				tp.parent.returnError(msg, err)
-				continue
-			}
-		}
-
-		handler := tp.handlers[msg.Partition]
-		if handler == nil {
-			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
-			tp.handlers[msg.Partition] = handler
-		}
-
-		handler <- msg
-	}
-
-	for _, handler := range tp.handlers {
-		close(handler)
-	}
-}
-
-func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
-	var partitions []int32
-
-	err := tp.breaker.Run(func() (err error) {
-		requiresConsistency := false
-		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
-			requiresConsistency = ep.MessageRequiresConsistency(msg)
-		} else {
-			requiresConsistency = tp.partitioner.RequiresConsistency()
-		}
-
-		if requiresConsistency {
-			partitions, err = tp.parent.client.Partitions(msg.Topic)
-		} else {
-			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
-		}
-		return
-	})
-	if err != nil {
-		return err
-	}
-
-	numPartitions := int32(len(partitions))
-
-	if numPartitions == 0 {
-		return ErrLeaderNotAvailable
-	}
-
-	choice, err := tp.partitioner.Partition(msg, numPartitions)
-
-	if err != nil {
-		return err
-	} else if choice < 0 || choice >= numPartitions {
-		return ErrInvalidPartition
-	}
-
-	msg.Partition = partitions[choice]
-
-	return nil
-}
-
-// one per partition per topic
-// dispatches messages to the appropriate broker
-// also responsible for maintaining message order during retries
-type partitionProducer struct {
-	parent    *asyncProducer
-	topic     string
-	partition int32
-	input     <-chan *ProducerMessage
-
-	leader         *Broker
-	breaker        *breaker.Breaker
-	brokerProducer *brokerProducer
-
-	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
-	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
-	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
-	// therefore whether our buffer is complete and safe to flush)
-	highWatermark int
-	retryState    []partitionRetryState
-}
-
-type partitionRetryState struct {
-	buf          []*ProducerMessage
-	expectChaser bool
-}
-
-func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
-	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
-	pp := &partitionProducer{
-		parent:    p,
-		topic:     topic,
-		partition: partition,
-		input:     input,
-
-		breaker:    breaker.New(3, 1, 10*time.Second),
-		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
-	}
-	go withRecover(pp.dispatch)
-	return input
-}
-
-func (pp *partitionProducer) backoff(retries int) {
-	var backoff time.Duration
-	if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
-		maxRetries := pp.parent.conf.Producer.Retry.Max
-		backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
-	} else {
-		backoff = pp.parent.conf.Producer.Retry.Backoff
-	}
-	if backoff > 0 {
-		time.Sleep(backoff)
-	}
-}
-
-func (pp *partitionProducer) dispatch() {
-	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
-	// on the first message
-	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
-	if pp.leader != nil {
-		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
-		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
-		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-	}
-
-	defer func() {
-		if pp.brokerProducer != nil {
-			pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-		}
-	}()
-
-	for msg := range pp.input {
-		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
-			select {
-			case <-pp.brokerProducer.abandoned:
-				// a message on the abandoned channel means that our current broker selection is out of date
-				Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-				pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-				pp.brokerProducer = nil
-				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
-			default:
-				// producer connection is still open.
-			}
-		}
-
-		if msg.retries > pp.highWatermark {
-			// a new, higher, retry level; handle it and then back off
-			pp.newHighWatermark(msg.retries)
-			pp.backoff(msg.retries)
-		} else if pp.highWatermark > 0 {
-			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
-			if msg.retries < pp.highWatermark {
-				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
-				if msg.flags&fin == fin {
-					pp.retryState[msg.retries].expectChaser = false
-					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
-				} else {
-					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
-				}
-				continue
-			} else if msg.flags&fin == fin {
-				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
-				// meaning this retry level is done and we can go down (at least) one level and flush that
-				pp.retryState[pp.highWatermark].expectChaser = false
-				pp.flushRetryBuffers()
-				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
-				continue
-			}
-		}
-
-		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
-		// without breaking any of our ordering guarantees
-
-		if pp.brokerProducer == nil {
-			if err := pp.updateLeader(); err != nil {
-				pp.parent.returnError(msg, err)
-				pp.backoff(msg.retries)
-				continue
-			}
-			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-		}
-
-		// Now that we know we have a broker to actually try and send this message to, generate the sequence
-		// number for it.
-		// All messages being retried (sent or not) have already had their retry count updated
-		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
-		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
-			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
-			msg.hasSequence = true
-		}
-
-		pp.brokerProducer.input <- msg
-	}
-}
-
-func (pp *partitionProducer) newHighWatermark(hwm int) {
-	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
-	pp.highWatermark = hwm
-
-	// send off a fin so that we know when everything "in between" has made it
-	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
-	pp.retryState[pp.highWatermark].expectChaser = true
-	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
-	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
-
-	// a new HWM means that our current broker selection is out of date
-	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-	pp.brokerProducer = nil
-}
-
-func (pp *partitionProducer) flushRetryBuffers() {
-	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
-	for {
-		pp.highWatermark--
-
-		if pp.brokerProducer == nil {
-			if err := pp.updateLeader(); err != nil {
-				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
-				goto flushDone
-			}
-			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-		}
-
-		for _, msg := range pp.retryState[pp.highWatermark].buf {
-			pp.brokerProducer.input <- msg
-		}
-
-	flushDone:
-		pp.retryState[pp.highWatermark].buf = nil
-		if pp.retryState[pp.highWatermark].expectChaser {
-			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
-			break
-		} else if pp.highWatermark == 0 {
-			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
-			break
-		}
-	}
-}
-
-func (pp *partitionProducer) updateLeader() error {
-	return pp.breaker.Run(func() (err error) {
-		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
-			return err
-		}
-
-		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
-			return err
-		}
-
-		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
-		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
-		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-
-		return nil
-	})
-}
-
-// one per broker; also constructs an associated flusher
-func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
-	var (
-		input     = make(chan *ProducerMessage)
-		bridge    = make(chan *produceSet)
-		responses = make(chan *brokerProducerResponse)
-	)
-
-	bp := &brokerProducer{
-		parent:         p,
-		broker:         broker,
-		input:          input,
-		output:         bridge,
-		responses:      responses,
-		stopchan:       make(chan struct{}),
-		buffer:         newProduceSet(p),
-		currentRetries: make(map[string]map[int32]error),
-	}
-	go withRecover(bp.run)
-
-	// minimal bridge to make the network response `select`able
-	go withRecover(func() {
-		for set := range bridge {
-			request := set.buildRequest()
-
-			response, err := broker.Produce(request)
-
-			responses <- &brokerProducerResponse{
-				set: set,
-				err: err,
-				res: response,
-			}
-		}
-		close(responses)
-	})
-
-	if p.conf.Producer.Retry.Max <= 0 {
-		bp.abandoned = make(chan struct{})
-	}
-
-	return bp
-}
-
-type brokerProducerResponse struct {
-	set *produceSet
-	err error
-	res *ProduceResponse
-}
-
-// groups messages together into appropriately-sized batches for sending to the broker
-// handles state related to retries etc
-type brokerProducer struct {
-	parent *asyncProducer
-	broker *Broker
-
-	input     chan *ProducerMessage
-	output    chan<- *produceSet
-	responses <-chan *brokerProducerResponse
-	abandoned chan struct{}
-	stopchan  chan struct{}
-
-	buffer     *produceSet
-	timer      <-chan time.Time
-	timerFired bool
-
-	closing        error
-	currentRetries map[string]map[int32]error
-}
-
-func (bp *brokerProducer) run() {
-	var output chan<- *produceSet
-	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
-
-	for {
-		select {
-		case msg, ok := <-bp.input:
-			if !ok {
-				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
-				bp.shutdown()
-				return
-			}
-
-			if msg == nil {
-				continue
-			}
-
-			if msg.flags&syn == syn {
-				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
-					bp.broker.ID(), msg.Topic, msg.Partition)
-				if bp.currentRetries[msg.Topic] == nil {
-					bp.currentRetries[msg.Topic] = make(map[int32]error)
-				}
-				bp.currentRetries[msg.Topic][msg.Partition] = nil
-				bp.parent.inFlight.Done()
-				continue
-			}
-
-			if reason := bp.needsRetry(msg); reason != nil {
-				bp.parent.retryMessage(msg, reason)
-
-				if bp.closing == nil && msg.flags&fin == fin {
-					// we were retrying this partition but we can start processing again
-					delete(bp.currentRetries[msg.Topic], msg.Partition)
-					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
-						bp.broker.ID(), msg.Topic, msg.Partition)
-				}
-
-				continue
-			}
-
-			if bp.buffer.wouldOverflow(msg) {
-				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-				if err := bp.waitForSpace(msg, false); err != nil {
-					bp.parent.retryMessage(msg, err)
-					continue
-				}
-			}
-
-			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
-				// The epoch was reset, need to roll the buffer over
-				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
-				if err := bp.waitForSpace(msg, true); err != nil {
-					bp.parent.retryMessage(msg, err)
-					continue
-				}
-			}
-			if err := bp.buffer.add(msg); err != nil {
-				bp.parent.returnError(msg, err)
-				continue
-			}
-
-			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
-				bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
-			}
-		case <-bp.timer:
-			bp.timerFired = true
-		case output <- bp.buffer:
-			bp.rollOver()
-		case response, ok := <-bp.responses:
-			if ok {
-				bp.handleResponse(response)
-			}
-		case <-bp.stopchan:
-			Logger.Printf(
-				"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
-			return
-		}
-
-		if bp.timerFired || bp.buffer.readyToFlush() {
-			output = bp.output
-		} else {
-			output = nil
-		}
-	}
-}
-
-func (bp *brokerProducer) shutdown() {
-	for !bp.buffer.empty() {
-		select {
-		case response := <-bp.responses:
-			bp.handleResponse(response)
-		case bp.output <- bp.buffer:
-			bp.rollOver()
-		}
-	}
-	close(bp.output)
-	for response := range bp.responses {
-		bp.handleResponse(response)
-	}
-	close(bp.stopchan)
-	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
-}
-
-func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
-	if bp.closing != nil {
-		return bp.closing
-	}
-
-	return bp.currentRetries[msg.Topic][msg.Partition]
-}
-
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
-	for {
-		select {
-		case response := <-bp.responses:
-			bp.handleResponse(response)
-			// handling a response can change our state, so re-check some things
-			if reason := bp.needsRetry(msg); reason != nil {
-				return reason
-			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
-				return nil
-			}
-		case bp.output <- bp.buffer:
-			bp.rollOver()
-			return nil
-		}
-	}
-}
-
-func (bp *brokerProducer) rollOver() {
-	bp.timer = nil
-	bp.timerFired = false
-	bp.buffer = newProduceSet(bp.parent)
-}
-
-func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
-	if response.err != nil {
-		bp.handleError(response.set, response.err)
-	} else {
-		bp.handleSuccess(response.set, response.res)
-	}
-
-	if bp.buffer.empty() {
-		bp.rollOver() // this can happen if the response invalidated our buffer
-	}
-}
-
-func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
-	// we iterate through the blocks in the request set, not the response, so that we notice
-	// if the response is missing a block completely
-	var retryTopics []string
-	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-		if response == nil {
-			// this only happens when RequiredAcks is NoResponse, so we have to assume success
-			bp.parent.returnSuccesses(pSet.msgs)
-			return
-		}
-
-		block := response.GetBlock(topic, partition)
-		if block == nil {
-			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
-			return
-		}
-
-		switch block.Err {
-		// Success
-		case ErrNoError:
-			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
-				for _, msg := range pSet.msgs {
-					msg.Timestamp = block.Timestamp
-				}
-			}
-			for i, msg := range pSet.msgs {
-				msg.Offset = block.Offset + int64(i)
-			}
-			bp.parent.returnSuccesses(pSet.msgs)
-		// Duplicate
-		case ErrDuplicateSequenceNumber:
-			bp.parent.returnSuccesses(pSet.msgs)
-		// Retriable errors
-		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
-			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
-			if bp.parent.conf.Producer.Retry.Max <= 0 {
-				bp.parent.abandonBrokerConnection(bp.broker)
-				bp.parent.returnErrors(pSet.msgs, block.Err)
-			} else {
-				retryTopics = append(retryTopics, topic)
-			}
-		// Other non-retriable errors
-		default:
-			if bp.parent.conf.Producer.Retry.Max <= 0 {
-				bp.parent.abandonBrokerConnection(bp.broker)
-			}
-			bp.parent.returnErrors(pSet.msgs, block.Err)
-		}
-	})
-
-	if len(retryTopics) > 0 {
-		if bp.parent.conf.Producer.Idempotent {
-			err := bp.parent.client.RefreshMetadata(retryTopics...)
-			if err != nil {
-				Logger.Printf("Failed refreshing metadata because of %v\n", err)
-			}
-		}
-
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			block := response.GetBlock(topic, partition)
-			if block == nil {
-				// handled in the previous "eachPartition" loop
-				return
-			}
-
-			switch block.Err {
-			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
-				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
-				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
-					bp.broker.ID(), topic, partition, block.Err)
-				if bp.currentRetries[topic] == nil {
-					bp.currentRetries[topic] = make(map[int32]error)
-				}
-				bp.currentRetries[topic][partition] = block.Err
-				if bp.parent.conf.Producer.Idempotent {
-					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
-				} else {
-					bp.parent.retryMessages(pSet.msgs, block.Err)
-				}
-				// dropping the following messages has the side effect of incrementing their retry count
-				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
-			}
-		})
-	}
-}
-
-func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
-	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
-	produceSet := newProduceSet(p)
-	produceSet.msgs[topic] = make(map[int32]*partitionSet)
-	produceSet.msgs[topic][partition] = pSet
-	produceSet.bufferBytes += pSet.bufferBytes
-	produceSet.bufferCount += len(pSet.msgs)
-	for _, msg := range pSet.msgs {
-		if msg.retries >= p.conf.Producer.Retry.Max {
-			p.returnError(msg, kerr)
-			return
-		}
-		msg.retries++
-	}
-
-	// it's expected that a metadata refresh has been requested prior to calling retryBatch
-	leader, err := p.client.Leader(topic, partition)
-	if err != nil {
-		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
-		for _, msg := range pSet.msgs {
-			p.returnError(msg, kerr)
-		}
-		return
-	}
-	bp := p.getBrokerProducer(leader)
-	bp.output <- produceSet
-}
-
-func (bp *brokerProducer) handleError(sent *produceSet, err error) {
-	switch err.(type) {
-	case PacketEncodingError:
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.returnErrors(pSet.msgs, err)
-		})
-	default:
-		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
-		bp.parent.abandonBrokerConnection(bp.broker)
-		_ = bp.broker.Close()
-		bp.closing = err
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.retryMessages(pSet.msgs, err)
-		})
-		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.retryMessages(pSet.msgs, err)
-		})
-		bp.rollOver()
-	}
-}
-
-// singleton
-// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
-// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
-func (p *asyncProducer) retryHandler() {
-	var msg *ProducerMessage
-	buf := queue.New()
-
-	for {
-		if buf.Length() == 0 {
-			msg = <-p.retries
-		} else {
-			select {
-			case msg = <-p.retries:
-			case p.input <- buf.Peek().(*ProducerMessage):
-				buf.Remove()
-				continue
-			}
-		}
-
-		if msg == nil {
-			return
-		}
-
-		buf.Add(msg)
-	}
-}
-
-// utility functions
-
-func (p *asyncProducer) shutdown() {
-	Logger.Println("Producer shutting down.")
-	p.inFlight.Add(1)
-	p.input <- &ProducerMessage{flags: shutdown}
-
-	p.inFlight.Wait()
-
-	err := p.client.Close()
-	if err != nil {
-		Logger.Println("producer/shutdown failed to close the embedded client:", err)
-	}
-
-	close(p.input)
-	close(p.retries)
-	close(p.errors)
-	close(p.successes)
-}
-
-func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
-	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
-	// will never see a message with this number, so we can never continue the sequence.
-	if msg.hasSequence {
-		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
-		p.txnmgr.bumpEpoch()
-	}
-	msg.clear()
-	pErr := &ProducerError{Msg: msg, Err: err}
-	if p.conf.Producer.Return.Errors {
-		p.errors <- pErr
-	} else {
-		Logger.Println(pErr)
-	}
-	p.inFlight.Done()
-}
-
-func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
-	for _, msg := range batch {
-		p.returnError(msg, err)
-	}
-}
-
-func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
-	for _, msg := range batch {
-		if p.conf.Producer.Return.Successes {
-			msg.clear()
-			p.successes <- msg
-		}
-		p.inFlight.Done()
-	}
-}
-
-func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
-	if msg.retries >= p.conf.Producer.Retry.Max {
-		p.returnError(msg, err)
-	} else {
-		msg.retries++
-		p.retries <- msg
-	}
-}
-
-func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
-	for _, msg := range batch {
-		p.retryMessage(msg, err)
-	}
-}
-
-func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	bp := p.brokers[broker]
-
-	if bp == nil {
-		bp = p.newBrokerProducer(broker)
-		p.brokers[broker] = bp
-		p.brokerRefs[bp] = 0
-	}
-
-	p.brokerRefs[bp]++
-
-	return bp
-}
-
-func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	p.brokerRefs[bp]--
-	if p.brokerRefs[bp] == 0 {
-		close(bp.input)
-		delete(p.brokerRefs, bp)
-
-		if p.brokers[broker] == bp {
-			delete(p.brokers, broker)
-		}
-	}
-}
-
-func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	bc, ok := p.brokers[broker]
-	if ok && bc.abandoned != nil {
-		close(bc.abandoned)
-	}
-
-	delete(p.brokers, broker)
-}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
deleted file mode 100644
index 9855bf4..0000000
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ /dev/null
@@ -1,1136 +0,0 @@
-package sarama
-
-import (
-	"container/heap"
-	"errors"
-	"fmt"
-	"math"
-	"sort"
-	"strings"
-)
-
-const (
-	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
-	RangeBalanceStrategyName = "range"
-
-	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
-	RoundRobinBalanceStrategyName = "roundrobin"
-
-	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
-	StickyBalanceStrategyName = "sticky"
-
-	defaultGeneration = -1
-)
-
-// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
-// It contains an allocation of topic/partitions by memberID in the form of
-// a `memberID -> topic -> partitions` map.
-type BalanceStrategyPlan map[string]map[string][]int32
-
-// Add assigns a topic with a number partitions to a member.
-func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
-	if len(partitions) == 0 {
-		return
-	}
-	if _, ok := p[memberID]; !ok {
-		p[memberID] = make(map[string][]int32, 1)
-	}
-	p[memberID][topic] = append(p[memberID][topic], partitions...)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategy is used to balance topics and partitions
-// across members of a consumer group
-type BalanceStrategy interface {
-	// Name uniquely identifies the strategy.
-	Name() string
-
-	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
-	// and returns a distribution plan.
-	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
-
-	// AssignmentData returns the serialized assignment data for the specified
-	// memberID
-	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
-// Example with one topic T with six partitions (0..5) and two members (M1, M2):
-//   M1: {T: [0, 1, 2]}
-//   M2: {T: [3, 4, 5]}
-var BalanceStrategyRange = &balanceStrategy{
-	name: RangeBalanceStrategyName,
-	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
-		step := float64(len(partitions)) / float64(len(memberIDs))
-
-		for i, memberID := range memberIDs {
-			pos := float64(i)
-			min := int(math.Floor(pos*step + 0.5))
-			max := int(math.Floor((pos+1)*step + 0.5))
-			plan.Add(memberID, topic, partitions[min:max]...)
-		}
-	},
-}
-
-// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
-// while maintain a balanced partition distribution.
-// Example with topic T with six partitions (0..5) and two members (M1, M2):
-//   M1: {T: [0, 2, 4]}
-//   M2: {T: [1, 3, 5]}
-//
-// On reassignment with an additional consumer, you might get an assignment plan like:
-//   M1: {T: [0, 2]}
-//   M2: {T: [1, 3]}
-//   M3: {T: [4, 5]}
-//
-var BalanceStrategySticky = &stickyBalanceStrategy{}
-
-// --------------------------------------------------------------------
-
-type balanceStrategy struct {
-	name   string
-	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
-}
-
-// Name implements BalanceStrategy.
-func (s *balanceStrategy) Name() string { return s.name }
-
-// Plan implements BalanceStrategy.
-func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	// Build members by topic map
-	mbt := make(map[string][]string)
-	for memberID, meta := range members {
-		for _, topic := range meta.Topics {
-			mbt[topic] = append(mbt[topic], memberID)
-		}
-	}
-
-	// Sort members for each topic
-	for topic, memberIDs := range mbt {
-		sort.Sort(&balanceStrategySortable{
-			topic:     topic,
-			memberIDs: memberIDs,
-		})
-	}
-
-	// Assemble plan
-	plan := make(BalanceStrategyPlan, len(members))
-	for topic, memberIDs := range mbt {
-		s.coreFn(plan, memberIDs, topic, topics[topic])
-	}
-	return plan, nil
-}
-
-// AssignmentData simple strategies do not require any shared assignment data
-func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return nil, nil
-}
-
-type balanceStrategySortable struct {
-	topic     string
-	memberIDs []string
-}
-
-func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
-func (p balanceStrategySortable) Swap(i, j int) {
-	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
-}
-
-func (p balanceStrategySortable) Less(i, j int) bool {
-	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
-}
-
-func balanceStrategyHashValue(vv ...string) uint32 {
-	h := uint32(2166136261)
-	for _, s := range vv {
-		for _, c := range s {
-			h ^= uint32(c)
-			h *= 16777619
-		}
-	}
-	return h
-}
-
-type stickyBalanceStrategy struct {
-	movements partitionMovements
-}
-
-// Name implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
-
-// Plan implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	// track partition movements during generation of the partition assignment plan
-	s.movements = partitionMovements{
-		Movements:                 make(map[topicPartitionAssignment]consumerPair),
-		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
-	}
-
-	// prepopulate the current assignment state from userdata on the consumer group members
-	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
-	if err != nil {
-		return nil, err
-	}
-
-	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
-	isFreshAssignment := false
-	if len(currentAssignment) == 0 {
-		isFreshAssignment = true
-	}
-
-	// create a mapping of all current topic partitions and the consumers that can be assigned to them
-	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
-	for topic, partitions := range topics {
-		for _, partition := range partitions {
-			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
-		}
-	}
-
-	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
-	// also, populate the mapping of partitions to potential consumers
-	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
-	for memberID, meta := range members {
-		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
-		for _, topicSubscription := range meta.Topics {
-			// only evaluate topic subscriptions that are present in the supplied topics map
-			if _, found := topics[topicSubscription]; found {
-				for _, partition := range topics[topicSubscription] {
-					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
-					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
-					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
-				}
-			}
-		}
-
-		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
-		if _, exists := currentAssignment[memberID]; !exists {
-			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
-		}
-	}
-
-	// create a mapping of each partition to its current consumer, where possible
-	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
-	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
-	for partition := range partition2AllPotentialConsumers {
-		unvisitedPartitions[partition] = true
-	}
-	var unassignedPartitions []topicPartitionAssignment
-	for memberID, partitions := range currentAssignment {
-		var keepPartitions []topicPartitionAssignment
-		for _, partition := range partitions {
-			// If this partition no longer exists at all, likely due to the
-			// topic being deleted, we remove the partition from the member.
-			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
-				continue
-			}
-			delete(unvisitedPartitions, partition)
-			currentPartitionConsumers[partition] = memberID
-
-			if !strsContains(members[memberID].Topics, partition.Topic) {
-				unassignedPartitions = append(unassignedPartitions, partition)
-				continue
-			}
-			keepPartitions = append(keepPartitions, partition)
-		}
-		currentAssignment[memberID] = keepPartitions
-	}
-	for unvisited := range unvisitedPartitions {
-		unassignedPartitions = append(unassignedPartitions, unvisited)
-	}
-
-	// sort the topic partitions in order of priority for reassignment
-	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
-
-	// at this point we have preserved all valid topic partition to consumer assignments and removed
-	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
-	// to consumers so that the topic partition assignments are as balanced as possible.
-
-	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
-	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
-	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
-
-	// Assemble plan
-	plan := make(BalanceStrategyPlan, len(currentAssignment))
-	for memberID, assignments := range currentAssignment {
-		if len(assignments) == 0 {
-			plan[memberID] = make(map[string][]int32)
-		} else {
-			for _, assignment := range assignments {
-				plan.Add(memberID, assignment.Topic, assignment.Partition)
-			}
-		}
-	}
-	return plan, nil
-}
-
-// AssignmentData serializes the set of topics currently assigned to the
-// specified member as part of the supplied balance plan
-func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return encode(&StickyAssignorUserDataV1{
-		Topics:     topics,
-		Generation: generationID,
-	}, nil)
-}
-
-func strsContains(s []string, value string) bool {
-	for _, entry := range s {
-		if entry == value {
-			return true
-		}
-	}
-	return false
-}
-
-// Balance assignments across consumers for maximum fairness and stickiness.
-func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
-	initializing := false
-	if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
-		initializing = true
-	}
-
-	// assign all unassigned partitions
-	for _, partition := range unassignedPartitions {
-		// skip if there is no potential consumer for the partition
-		if len(partition2AllPotentialConsumers[partition]) == 0 {
-			continue
-		}
-		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
-	}
-
-	// narrow down the reassignment scope to only those partitions that can actually be reassigned
-	for partition := range partition2AllPotentialConsumers {
-		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
-			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
-		}
-	}
-
-	// narrow down the reassignment scope to only those consumers that are subject to reassignment
-	fixedAssignments := make(map[string][]topicPartitionAssignment)
-	for memberID := range consumer2AllPotentialPartitions {
-		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
-			fixedAssignments[memberID] = currentAssignment[memberID]
-			delete(currentAssignment, memberID)
-			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
-		}
-	}
-
-	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
-	preBalanceAssignment := deepCopyAssignment(currentAssignment)
-	preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
-	for k, v := range currentPartitionConsumer {
-		preBalancePartitionConsumers[k] = v
-	}
-
-	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
-
-	// if we are not preserving existing assignments and we have made changes to the current assignment
-	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
-	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
-		currentAssignment = deepCopyAssignment(preBalanceAssignment)
-		currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
-		for k, v := range preBalancePartitionConsumers {
-			currentPartitionConsumer[k] = v
-		}
-	}
-
-	// add the fixed assignments (those that could not change) back
-	for consumer, assignments := range fixedAssignments {
-		currentAssignment[consumer] = assignments
-	}
-}
-
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
-// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
-// M0: [t0p0, t0p2, t1p1]
-// M1: [t0p1, t1p0, t1p2]
-var BalanceStrategyRoundRobin = new(roundRobinBalancer)
-
-type roundRobinBalancer struct{}
-
-func (b *roundRobinBalancer) Name() string {
-	return RoundRobinBalanceStrategyName
-}
-
-func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	if len(memberAndMetadata) == 0 || len(topics) == 0 {
-		return nil, errors.New("members and topics are not provided")
-	}
-	// sort partitions
-	var topicPartitions []topicAndPartition
-	for topic, partitions := range topics {
-		for _, partition := range partitions {
-			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
-		}
-	}
-	sort.SliceStable(topicPartitions, func(i, j int) bool {
-		pi := topicPartitions[i]
-		pj := topicPartitions[j]
-		return pi.comparedValue() < pj.comparedValue()
-	})
-
-	// sort members
-	var members []memberAndTopic
-	for memberID, meta := range memberAndMetadata {
-		m := memberAndTopic{
-			memberID: memberID,
-			topics:   make(map[string]struct{}),
-		}
-		for _, t := range meta.Topics {
-			m.topics[t] = struct{}{}
-		}
-		members = append(members, m)
-	}
-	sort.SliceStable(members, func(i, j int) bool {
-		mi := members[i]
-		mj := members[j]
-		return mi.memberID < mj.memberID
-	})
-
-	// assign partitions
-	plan := make(BalanceStrategyPlan, len(members))
-	i := 0
-	n := len(members)
-	for _, tp := range topicPartitions {
-		m := members[i%n]
-		for !m.hasTopic(tp.topic) {
-			i++
-			m = members[i%n]
-		}
-		plan.Add(m.memberID, tp.topic, tp.partition)
-		i++
-	}
-	return plan, nil
-}
-
-func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return nil, nil // do nothing for now
-}
-
-type topicAndPartition struct {
-	topic     string
-	partition int32
-}
-
-func (tp *topicAndPartition) comparedValue() string {
-	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
-}
-
-type memberAndTopic struct {
-	memberID string
-	topics   map[string]struct{}
-}
-
-func (m *memberAndTopic) hasTopic(topic string) bool {
-	_, isExist := m.topics[topic]
-	return isExist
-}
-
-// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
-// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
-// Lower balance score indicates a more balanced assignment.
-func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
-	consumer2AssignmentSize := make(map[string]int, len(assignment))
-	for memberID, partitions := range assignment {
-		consumer2AssignmentSize[memberID] = len(partitions)
-	}
-
-	var score float64
-	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
-		delete(consumer2AssignmentSize, memberID)
-		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
-			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
-		}
-	}
-	return int(score)
-}
-
-// Determine whether the current assignment plan is balanced.
-func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
-	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
-	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
-	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
-	if min >= max-1 {
-		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
-		return true
-	}
-
-	// create a mapping from partitions to the consumer assigned to them
-	allPartitions := make(map[topicPartitionAssignment]string)
-	for memberID, partitions := range currentAssignment {
-		for _, partition := range partitions {
-			if _, exists := allPartitions[partition]; exists {
-				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
-			}
-			allPartitions[partition] = memberID
-		}
-	}
-
-	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
-	// could but did not get cannot be moved to it (because that would break the balance)
-	for _, memberID := range sortedCurrentSubscriptions {
-		consumerPartitions := currentAssignment[memberID]
-		consumerPartitionCount := len(consumerPartitions)
-
-		// skip if this consumer already has all the topic partitions it can get
-		if consumerPartitionCount == len(allSubscriptions[memberID]) {
-			continue
-		}
-
-		// otherwise make sure it cannot get any more
-		potentialTopicPartitions := allSubscriptions[memberID]
-		for _, partition := range potentialTopicPartitions {
-			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
-				otherConsumer := allPartitions[partition]
-				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
-				if consumerPartitionCount < otherConsumerPartitionCount {
-					return false
-				}
-			}
-		}
-	}
-	return true
-}
-
-// Reassign all topic partitions that need reassignment until balanced.
-func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
-	reassignmentPerformed := false
-	modified := false
-
-	// repeat reassignment until no partition can be moved to improve the balance
-	for {
-		modified = false
-		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
-		// until the full list is processed or a balance is achieved
-		for _, partition := range reassignablePartitions {
-			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
-				break
-			}
-
-			// the partition must have at least two consumers
-			if len(partition2AllPotentialConsumers[partition]) <= 1 {
-				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
-			}
-
-			// the partition must have a consumer
-			consumer := currentPartitionConsumer[partition]
-			if consumer == "" {
-				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
-			}
-
-			if _, exists := prevAssignment[partition]; exists {
-				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
-					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
-					reassignmentPerformed = true
-					modified = true
-					continue
-				}
-			}
-
-			// check if a better-suited consumer exists for the partition; if so, reassign it
-			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
-				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
-					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
-					reassignmentPerformed = true
-					modified = true
-					break
-				}
-			}
-		}
-		if !modified {
-			return reassignmentPerformed
-		}
-	}
-}
-
-// Identify a new consumer for a topic partition and reassign it.
-func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
-	for _, anotherConsumer := range sortedCurrentSubscriptions {
-		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
-			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
-		}
-	}
-	return sortedCurrentSubscriptions
-}
-
-// Reassign a specific partition to a new consumer
-func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
-	consumer := currentPartitionConsumer[partition]
-	// find the correct partition movement considering the stickiness requirement
-	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
-	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
-}
-
-// Track the movement of a topic partition after assignment
-func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
-	oldConsumer := currentPartitionConsumer[partition]
-	s.movements.movePartition(partition, oldConsumer, newConsumer)
-
-	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
-	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
-	currentPartitionConsumer[partition] = newConsumer
-	return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Determine whether a specific consumer should be considered for topic partition assignment.
-func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
-	currentPartitions := currentAssignment[memberID]
-	currentAssignmentSize := len(currentPartitions)
-	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
-	if currentAssignmentSize > maxAssignmentSize {
-		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
-	}
-	if currentAssignmentSize < maxAssignmentSize {
-		// if a consumer is not assigned all its potential partitions it is subject to reassignment
-		return true
-	}
-	for _, partition := range currentPartitions {
-		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
-			return true
-		}
-	}
-	return false
-}
-
-// Only consider reassigning those topic partitions that have two or more potential consumers.
-func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
-	return len(partition2AllPotentialConsumers[partition]) >= 2
-}
-
-// The assignment should improve the overall balance of the partition assignments to consumers.
-func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
-	for _, memberID := range sortedCurrentSubscriptions {
-		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
-			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
-			currentPartitionConsumer[partition] = memberID
-			break
-		}
-	}
-	return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
-func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
-	userDataV1 := &StickyAssignorUserDataV1{}
-	if err := decode(userDataBytes, userDataV1); err != nil {
-		userDataV0 := &StickyAssignorUserDataV0{}
-		if err := decode(userDataBytes, userDataV0); err != nil {
-			return nil, err
-		}
-		return userDataV0, nil
-	}
-	return userDataV1, nil
-}
-
-// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
-// to those topic partitions currently reported by the Kafka cluster.
-func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
-	assignments := deepCopyAssignment(currentAssignment)
-	for memberID, partitions := range assignments {
-		// perform in-place filtering
-		i := 0
-		for _, partition := range partitions {
-			if _, exists := partition2AllPotentialConsumers[partition]; exists {
-				partitions[i] = partition
-				i++
-			}
-		}
-		assignments[memberID] = partitions[:i]
-	}
-	return assignments
-}
-
-func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
-	for i, assignment := range assignments {
-		if assignment == topic {
-			return append(assignments[:i], assignments[i+1:]...)
-		}
-	}
-	return assignments
-}
-
-func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
-	for _, assignment := range assignments {
-		if assignment == topic {
-			return true
-		}
-	}
-	return false
-}
-
-func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
-	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
-	for partition := range partition2AllPotentialConsumers {
-		unassignedPartitions[partition] = true
-	}
-
-	sortedPartitions := make([]topicPartitionAssignment, 0)
-	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
-		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
-		// then we just need to simply list partitions in a round robin fashion (from consumers with
-		// most assigned partitions to those with least)
-		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
-
-		// use priority-queue to evaluate consumer group members in descending-order based on
-		// the number of topic partition assignments (i.e. consumers with most assignments first)
-		pq := make(assignmentPriorityQueue, len(assignments))
-		i := 0
-		for consumerID, consumerAssignments := range assignments {
-			pq[i] = &consumerGroupMember{
-				id:          consumerID,
-				assignments: consumerAssignments,
-			}
-			i++
-		}
-		heap.Init(&pq)
-
-		for {
-			// loop until no consumer-group members remain
-			if pq.Len() == 0 {
-				break
-			}
-			member := pq[0]
-
-			// partitions that were assigned to a different consumer last time
-			var prevPartitionIndex int
-			for i, partition := range member.assignments {
-				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
-					prevPartitionIndex = i
-					break
-				}
-			}
-
-			if len(member.assignments) > 0 {
-				partition := member.assignments[prevPartitionIndex]
-				sortedPartitions = append(sortedPartitions, partition)
-				delete(unassignedPartitions, partition)
-				if prevPartitionIndex == 0 {
-					member.assignments = member.assignments[1:]
-				} else {
-					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
-				}
-				heap.Fix(&pq, 0)
-			} else {
-				heap.Pop(&pq)
-			}
-		}
-
-		for partition := range unassignedPartitions {
-			sortedPartitions = append(sortedPartitions, partition)
-		}
-	} else {
-		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
-		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
-	}
-	return sortedPartitions
-}
-
-func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
-	// sort the members by the number of partition assignments in ascending order
-	sortedMemberIDs := make([]string, 0, len(assignments))
-	for memberID := range assignments {
-		sortedMemberIDs = append(sortedMemberIDs, memberID)
-	}
-	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
-		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
-		if ret == 0 {
-			return sortedMemberIDs[i] < sortedMemberIDs[j]
-		}
-		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
-	})
-	return sortedMemberIDs
-}
-
-func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
-	// sort the members by the number of partition assignments in descending order
-	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
-	i := 0
-	for partition := range partition2AllPotentialConsumers {
-		sortedPartionIDs[i] = partition
-		i++
-	}
-	sort.Slice(sortedPartionIDs, func(i, j int) bool {
-		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
-			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
-			if ret == 0 {
-				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
-			}
-			return ret < 0
-		}
-		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
-	})
-	return sortedPartionIDs
-}
-
-func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
-	m := make(map[string][]topicPartitionAssignment, len(assignment))
-	for memberID, subscriptions := range assignment {
-		m[memberID] = append(subscriptions[:0:0], subscriptions...)
-	}
-	return m
-}
-
-func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
-	curMembers := make(map[string]int)
-	for _, cur := range partition2AllPotentialConsumers {
-		if len(curMembers) == 0 {
-			for _, curMembersElem := range cur {
-				curMembers[curMembersElem]++
-			}
-			continue
-		}
-
-		if len(curMembers) != len(cur) {
-			return false
-		}
-
-		yMap := make(map[string]int)
-		for _, yElem := range cur {
-			yMap[yElem]++
-		}
-
-		for curMembersMapKey, curMembersMapVal := range curMembers {
-			if yMap[curMembersMapKey] != curMembersMapVal {
-				return false
-			}
-		}
-	}
-
-	curPartitions := make(map[topicPartitionAssignment]int)
-	for _, cur := range consumer2AllPotentialPartitions {
-		if len(curPartitions) == 0 {
-			for _, curPartitionElem := range cur {
-				curPartitions[curPartitionElem]++
-			}
-			continue
-		}
-
-		if len(curPartitions) != len(cur) {
-			return false
-		}
-
-		yMap := make(map[topicPartitionAssignment]int)
-		for _, yElem := range cur {
-			yMap[yElem]++
-		}
-
-		for curMembersMapKey, curMembersMapVal := range curPartitions {
-			if yMap[curMembersMapKey] != curMembersMapVal {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// We need to process subscriptions' user data with each consumer's reported generation in mind
-// higher generations overwrite lower generations in case of a conflict
-// note that a conflict could exist only if user data is for different generations
-func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
-	currentAssignment := make(map[string][]topicPartitionAssignment)
-	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
-
-	// for each partition we create a sorted map of its consumers by generation
-	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
-	for memberID, meta := range members {
-		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
-		if err != nil {
-			return nil, nil, err
-		}
-		for _, partition := range consumerUserData.partitions() {
-			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
-				if consumerUserData.hasGeneration() {
-					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
-						// same partition is assigned to two consumers during the same rebalance.
-						// log a warning and skip this record
-						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
-						continue
-					} else {
-						consumers[consumerUserData.generation()] = memberID
-					}
-				} else {
-					consumers[defaultGeneration] = memberID
-				}
-			} else {
-				generation := defaultGeneration
-				if consumerUserData.hasGeneration() {
-					generation = consumerUserData.generation()
-				}
-				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
-			}
-		}
-	}
-
-	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
-	// current and previous consumers are the last two consumers of each partition in the above sorted map
-	for partition, consumers := range sortedPartitionConsumersByGeneration {
-		// sort consumers by generation in decreasing order
-		var generations []int
-		for generation := range consumers {
-			generations = append(generations, generation)
-		}
-		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
-
-		consumer := consumers[generations[0]]
-		if _, exists := currentAssignment[consumer]; !exists {
-			currentAssignment[consumer] = []topicPartitionAssignment{partition}
-		} else {
-			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
-		}
-
-		// check for previous assignment, if any
-		if len(generations) > 1 {
-			prevAssignment[partition] = consumerGenerationPair{
-				MemberID:   consumers[generations[1]],
-				Generation: generations[1],
-			}
-		}
-	}
-	return currentAssignment, prevAssignment, nil
-}
-
-type consumerGenerationPair struct {
-	MemberID   string
-	Generation int
-}
-
-// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
-type consumerPair struct {
-	SrcMemberID string
-	DstMemberID string
-}
-
-// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
-type partitionMovements struct {
-	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
-	Movements                 map[topicPartitionAssignment]consumerPair
-}
-
-func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
-	pair := p.Movements[partition]
-	delete(p.Movements, partition)
-
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	delete(partitionMovementsForThisTopic[pair], partition)
-	if len(partitionMovementsForThisTopic[pair]) == 0 {
-		delete(partitionMovementsForThisTopic, pair)
-	}
-	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
-		delete(p.PartitionMovementsByTopic, partition.Topic)
-	}
-	return pair
-}
-
-func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
-	p.Movements[partition] = pair
-	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
-		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
-	}
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
-		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
-	}
-	partitionMovementsForThisTopic[pair][partition] = true
-}
-
-func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
-	pair := consumerPair{
-		SrcMemberID: oldConsumer,
-		DstMemberID: newConsumer,
-	}
-	if _, exists := p.Movements[partition]; exists {
-		// this partition has previously moved
-		existingPair := p.removeMovementRecordOfPartition(partition)
-		if existingPair.DstMemberID != oldConsumer {
-			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
-		}
-		if existingPair.SrcMemberID != newConsumer {
-			// the partition is not moving back to its previous consumer
-			p.addPartitionMovementRecord(partition, consumerPair{
-				SrcMemberID: existingPair.SrcMemberID,
-				DstMemberID: newConsumer,
-			})
-		}
-	} else {
-		p.addPartitionMovementRecord(partition, pair)
-	}
-}
-
-func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
-	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
-		return partition
-	}
-	if _, exists := p.Movements[partition]; exists {
-		// this partition has previously moved
-		if oldConsumer != p.Movements[partition].DstMemberID {
-			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
-		}
-		oldConsumer = p.Movements[partition].SrcMemberID
-	}
-
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	reversePair := consumerPair{
-		SrcMemberID: newConsumer,
-		DstMemberID: oldConsumer,
-	}
-	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
-		return partition
-	}
-	var reversePairPartition topicPartitionAssignment
-	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
-		reversePairPartition = otherPartition
-	}
-	return reversePairPartition
-}
-
-func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
-	if src == dst {
-		return currentPath, false
-	}
-	if len(pairs) == 0 {
-		return currentPath, false
-	}
-	for _, pair := range pairs {
-		if src == pair.SrcMemberID && dst == pair.DstMemberID {
-			currentPath = append(currentPath, src, dst)
-			return currentPath, true
-		}
-	}
-
-	for _, pair := range pairs {
-		if pair.SrcMemberID == src {
-			// create a deep copy of the pairs, excluding the current pair
-			reducedSet := make([]consumerPair, len(pairs)-1)
-			i := 0
-			for _, p := range pairs {
-				if p != pair {
-					reducedSet[i] = pair
-					i++
-				}
-			}
-
-			currentPath = append(currentPath, pair.SrcMemberID)
-			return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
-		}
-	}
-	return currentPath, false
-}
-
-func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
-	superCycle := make([]string, len(cycle)-1)
-	for i := 0; i < len(cycle)-1; i++ {
-		superCycle[i] = cycle[i]
-	}
-	superCycle = append(superCycle, cycle...)
-	for _, foundCycle := range cycles {
-		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
-			return true
-		}
-	}
-	return false
-}
-
-func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
-	cycles := make([][]string, 0)
-	for _, pair := range pairs {
-		// create a deep copy of the pairs, excluding the current pair
-		reducedPairs := make([]consumerPair, len(pairs)-1)
-		i := 0
-		for _, p := range pairs {
-			if p != pair {
-				reducedPairs[i] = pair
-				i++
-			}
-		}
-		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
-			if !p.in(path, cycles) {
-				cycles = append(cycles, path)
-				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
-			}
-		}
-	}
-
-	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
-	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
-	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
-	for _, cycle := range cycles {
-		if len(cycle) == 3 {
-			return true
-		}
-	}
-	return false
-}
-
-func (p *partitionMovements) isSticky() bool {
-	for topic, movements := range p.PartitionMovementsByTopic {
-		movementPairs := make([]consumerPair, len(movements))
-		i := 0
-		for pair := range movements {
-			movementPairs[i] = pair
-			i++
-		}
-		if p.hasCycles(movementPairs) {
-			Logger.Printf("Stickiness is violated for topic %s", topic)
-			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
-			return false
-		}
-	}
-	return true
-}
-
-func indexOfSubList(source []string, target []string) int {
-	targetSize := len(target)
-	maxCandidate := len(source) - targetSize
-nextCand:
-	for candidate := 0; candidate <= maxCandidate; candidate++ {
-		j := candidate
-		for i := 0; i < targetSize; i++ {
-			if target[i] != source[j] {
-				// Element mismatch, try next cand
-				continue nextCand
-			}
-			j++
-		}
-		// All elements of candidate matched target
-		return candidate
-	}
-	return -1
-}
-
-type consumerGroupMember struct {
-	id          string
-	assignments []topicPartitionAssignment
-}
-
-// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
-// in descending order (most assignments to least assignments).
-type assignmentPriorityQueue []*consumerGroupMember
-
-func (pq assignmentPriorityQueue) Len() int { return len(pq) }
-
-func (pq assignmentPriorityQueue) Less(i, j int) bool {
-	// order asssignment priority queue in descending order using assignment-count/member-id
-	if len(pq[i].assignments) == len(pq[j].assignments) {
-		return strings.Compare(pq[i].id, pq[j].id) > 0
-	}
-	return len(pq[i].assignments) > len(pq[j].assignments)
-}
-
-func (pq assignmentPriorityQueue) Swap(i, j int) {
-	pq[i], pq[j] = pq[j], pq[i]
-}
-
-func (pq *assignmentPriorityQueue) Push(x interface{}) {
-	member := x.(*consumerGroupMember)
-	*pq = append(*pq, member)
-}
-
-func (pq *assignmentPriorityQueue) Pop() interface{} {
-	old := *pq
-	n := len(old)
-	member := old[n-1]
-	*pq = old[0 : n-1]
-	return member
-}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
deleted file mode 100644
index dd01e4e..0000000
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ /dev/null
@@ -1,1475 +0,0 @@
-package sarama
-
-import (
-	"crypto/tls"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"net"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
-	conf *Config
-	rack *string
-
-	id            int32
-	addr          string
-	correlationID int32
-	conn          net.Conn
-	connErr       error
-	lock          sync.Mutex
-	opened        int32
-	responses     chan responsePromise
-	done          chan bool
-
-	registeredMetrics []string
-
-	incomingByteRate       metrics.Meter
-	requestRate            metrics.Meter
-	requestSize            metrics.Histogram
-	requestLatency         metrics.Histogram
-	outgoingByteRate       metrics.Meter
-	responseRate           metrics.Meter
-	responseSize           metrics.Histogram
-	requestsInFlight       metrics.Counter
-	brokerIncomingByteRate metrics.Meter
-	brokerRequestRate      metrics.Meter
-	brokerRequestSize      metrics.Histogram
-	brokerRequestLatency   metrics.Histogram
-	brokerOutgoingByteRate metrics.Meter
-	brokerResponseRate     metrics.Meter
-	brokerResponseSize     metrics.Histogram
-	brokerRequestsInFlight metrics.Counter
-
-	kerberosAuthenticator GSSAPIKerberosAuth
-}
-
-// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
-type SASLMechanism string
-
-const (
-	// SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
-	SASLTypeOAuth = "OAUTHBEARER"
-	// SASLTypePlaintext represents the SASL/PLAIN mechanism
-	SASLTypePlaintext = "PLAIN"
-	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
-	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
-	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
-	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
-	SASLTypeGSSAPI      = "GSSAPI"
-	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
-	// server negotiate SASL auth using opaque packets.
-	SASLHandshakeV0 = int16(0)
-	// SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
-	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
-	SASLHandshakeV1 = int16(1)
-	// SASLExtKeyAuth is the reserved extension key name sent as part of the
-	// SASL/OAUTHBEARER initial client response
-	SASLExtKeyAuth = "auth"
-)
-
-// AccessToken contains an access token used to authenticate a
-// SASL/OAUTHBEARER client along with associated metadata.
-type AccessToken struct {
-	// Token is the access token payload.
-	Token string
-	// Extensions is a optional map of arbitrary key-value pairs that can be
-	// sent with the SASL/OAUTHBEARER initial client response. These values are
-	// ignored by the SASL server if they are unexpected. This feature is only
-	// supported by Kafka >= 2.1.0.
-	Extensions map[string]string
-}
-
-// AccessTokenProvider is the interface that encapsulates how implementors
-// can generate access tokens for Kafka broker authentication.
-type AccessTokenProvider interface {
-	// Token returns an access token. The implementation should ensure token
-	// reuse so that multiple calls at connect time do not create multiple
-	// tokens. The implementation should also periodically refresh the token in
-	// order to guarantee that each call returns an unexpired token.  This
-	// method should not block indefinitely--a timeout error should be returned
-	// after a short period of inactivity so that the broker connection logic
-	// can log debugging information and retry.
-	Token() (*AccessToken, error)
-}
-
-// SCRAMClient is a an interface to a SCRAM
-// client implementation.
-type SCRAMClient interface {
-	// Begin prepares the client for the SCRAM exchange
-	// with the server with a user name and a password
-	Begin(userName, password, authzID string) error
-	// Step steps client through the SCRAM exchange. It is
-	// called repeatedly until it errors or `Done` returns true.
-	Step(challenge string) (response string, err error)
-	// Done should return true when the SCRAM conversation
-	// is over.
-	Done() bool
-}
-
-type responsePromise struct {
-	requestTime   time.Time
-	correlationID int32
-	headerVersion int16
-	packets       chan []byte
-	errors        chan error
-}
-
-// NewBroker creates and returns a Broker targeting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
-	return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
-// waiting for the connection to complete. This means that any subsequent operations on the broker will
-// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
-// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
-// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
-func (b *Broker) Open(conf *Config) error {
-	if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
-		return ErrAlreadyConnected
-	}
-
-	if conf == nil {
-		conf = NewConfig()
-	}
-
-	err := conf.Validate()
-	if err != nil {
-		return err
-	}
-
-	b.lock.Lock()
-
-	go withRecover(func() {
-		defer b.lock.Unlock()
-
-		dialer := conf.getDialer()
-		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
-		if b.connErr != nil {
-			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
-			b.conn = nil
-			atomic.StoreInt32(&b.opened, 0)
-			return
-		}
-		if conf.Net.TLS.Enable {
-			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
-		}
-
-		b.conn = newBufConn(b.conn)
-		b.conf = conf
-
-		// Create or reuse the global metrics shared between brokers
-		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
-		b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
-		b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
-		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
-		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
-		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
-		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
-		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
-		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
-		// the same id (-1) and are already exposed through the global metrics above
-		if b.id >= 0 {
-			b.registerMetrics()
-		}
-
-		if conf.Net.SASL.Enable {
-			b.connErr = b.authenticateViaSASL()
-
-			if b.connErr != nil {
-				err = b.conn.Close()
-				if err == nil {
-					Logger.Printf("Closed connection to broker %s\n", b.addr)
-				} else {
-					Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
-				}
-				b.conn = nil
-				atomic.StoreInt32(&b.opened, 0)
-				return
-			}
-		}
-
-		b.done = make(chan bool)
-		b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
-
-		if b.id >= 0 {
-			Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
-		} else {
-			Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
-		}
-		go withRecover(b.responseReceiver)
-	})
-
-	return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	return b.conn != nil, b.connErr
-}
-
-// Close closes the broker resources
-func (b *Broker) Close() error {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	if b.conn == nil {
-		return ErrNotConnected
-	}
-
-	close(b.responses)
-	<-b.done
-
-	err := b.conn.Close()
-
-	b.conn = nil
-	b.connErr = nil
-	b.done = nil
-	b.responses = nil
-
-	b.unregisterMetrics()
-
-	if err == nil {
-		Logger.Printf("Closed connection to broker %s\n", b.addr)
-	} else {
-		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
-	}
-
-	atomic.StoreInt32(&b.opened, 0)
-
-	return err
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
-	return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
-	return b.addr
-}
-
-// Rack returns the broker's rack as retrieved from Kafka's metadata or the
-// empty string if it is not known.  The returned value corresponds to the
-// broker's broker.rack configuration setting.  Requires protocol version to be
-// at least v0.10.0.0.
-func (b *Broker) Rack() string {
-	if b.rack == nil {
-		return ""
-	}
-	return *b.rack
-}
-
-// GetMetadata send a metadata request and returns a metadata response or error
-func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
-	response := new(MetadataResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
-func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
-	response := new(ConsumerMetadataResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// FindCoordinator sends a find coordinate request and returns a response or error
-func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
-	response := new(FindCoordinatorResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// GetAvailableOffsets return an offset response or error
-func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
-	response := new(OffsetResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Produce returns a produce response or error
-func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
-	var (
-		response *ProduceResponse
-		err      error
-	)
-
-	if request.RequiredAcks == NoResponse {
-		err = b.sendAndReceive(request, nil)
-	} else {
-		response = new(ProduceResponse)
-		err = b.sendAndReceive(request, response)
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Fetch returns a FetchResponse or error
-func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
-	response := new(FetchResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CommitOffset return an Offset commit response or error
-func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
-	response := new(OffsetCommitResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// FetchOffset returns an offset fetch response or error
-func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
-	response := new(OffsetFetchResponse)
-	response.Version = request.Version // needed to handle the two header versions
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// JoinGroup returns a join group response or error
-func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
-	response := new(JoinGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// SyncGroup returns a sync group response or error
-func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
-	response := new(SyncGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// LeaveGroup return a leave group response or error
-func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
-	response := new(LeaveGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Heartbeat returns a heartbeat response or error
-func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
-	response := new(HeartbeatResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ListGroups return a list group response or error
-func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
-	response := new(ListGroupsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeGroups return describe group response or error
-func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
-	response := new(DescribeGroupsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ApiVersions return api version response or error
-func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
-	response := new(ApiVersionsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreateTopics send a create topic request and returns create topic response
-func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
-	response := new(CreateTopicsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteTopics sends a delete topic request and returns delete topic response
-func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
-	response := new(DeleteTopicsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreatePartitions sends a create partition request and returns create
-// partitions response or error
-func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
-	response := new(CreatePartitionsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AlterPartitionReassignments sends a alter partition reassignments request and
-// returns alter partition reassignments response
-func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
-	response := new(AlterPartitionReassignmentsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ListPartitionReassignments sends a list partition reassignments request and
-// returns list partition reassignments response
-func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
-	response := new(ListPartitionReassignmentsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteRecords send a request to delete records and return delete record
-// response or error
-func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
-	response := new(DeleteRecordsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeAcls sends a describe acl request and returns a response or error
-func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
-	response := new(DescribeAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreateAcls sends a create acl request and returns a response or error
-func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
-	response := new(CreateAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteAcls sends a delete acl request and returns a response or error
-func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
-	response := new(DeleteAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// InitProducerID sends an init producer request and returns a response or error
-func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
-	response := new(InitProducerIDResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AddPartitionsToTxn send a request to add partition to txn and returns
-// a response or error
-func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
-	response := new(AddPartitionsToTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-// or error
-func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
-	response := new(AddOffsetsToTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// EndTxn sends a request to end txn and returns a response or error
-func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
-	response := new(EndTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// TxnOffsetCommit sends a request to commit transaction offsets and returns
-// a response or error
-func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
-	response := new(TxnOffsetCommitResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeConfigs sends a request to describe config and returns a response or
-// error
-func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
-	response := new(DescribeConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AlterConfigs sends a request to alter config and return a response or error
-func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
-	response := new(AlterConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
-func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
-	response := new(IncrementalAlterConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteGroups sends a request to delete groups and returns a response or error
-func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
-	response := new(DeleteGroupsResponse)
-
-	if err := b.sendAndReceive(request, response); err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
-func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
-	response := new(DescribeLogDirsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeUserScramCredentials sends a request to get SCRAM users
-func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
-	res := new(DescribeUserScramCredentialsResponse)
-
-	err := b.sendAndReceive(req, res)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, err
-}
-
-func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
-	res := new(AlterUserScramCredentialsResponse)
-
-	err := b.sendAndReceive(req, res)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-// readFull ensures the conn ReadDeadline has been setup before making a
-// call to io.ReadFull
-func (b *Broker) readFull(buf []byte) (n int, err error) {
-	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
-		return 0, err
-	}
-
-	return io.ReadFull(b.conn, buf)
-}
-
-// write  ensures the conn WriteDeadline has been setup before making a
-// call to conn.Write
-func (b *Broker) write(buf []byte) (n int, err error) {
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
-}
-
-func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	if b.conn == nil {
-		if b.connErr != nil {
-			return nil, b.connErr
-		}
-		return nil, ErrNotConnected
-	}
-
-	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
-		return nil, ErrUnsupportedVersion
-	}
-
-	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return nil, err
-	}
-
-	requestTime := time.Now()
-	// Will be decremented in responseReceiver (except error or request with NoResponse)
-	b.addRequestInFlightMetrics(1)
-	bytes, err := b.write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		return nil, err
-	}
-	b.correlationID++
-
-	if !promiseResponse {
-		// Record request latency without the response
-		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
-		return nil, nil
-	}
-
-	promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)}
-	b.responses <- promise
-
-	return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
-	responseHeaderVersion := int16(-1)
-	if res != nil {
-		responseHeaderVersion = res.headerVersion()
-	}
-
-	promise, err := b.send(req, res != nil, responseHeaderVersion)
-	if err != nil {
-		return err
-	}
-
-	if promise == nil {
-		return nil
-	}
-
-	select {
-	case buf := <-promise.packets:
-		return versionedDecode(buf, res, req.version())
-	case err = <-promise.errors:
-		return err
-	}
-}
-
-func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
-	b.id, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	host, err := pd.getString()
-	if err != nil {
-		return err
-	}
-
-	port, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		b.rack, err = pd.getNullableString()
-		if err != nil {
-			return err
-		}
-	}
-
-	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
-	if _, _, err := net.SplitHostPort(b.addr); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
-	host, portstr, err := net.SplitHostPort(b.addr)
-	if err != nil {
-		return err
-	}
-
-	port, err := strconv.ParseInt(portstr, 10, 32)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(b.id)
-
-	err = pe.putString(host)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(int32(port))
-
-	if version >= 1 {
-		err = pe.putNullableString(b.rack)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *Broker) responseReceiver() {
-	var dead error
-
-	for response := range b.responses {
-		if dead != nil {
-			// This was previously incremented in send() and
-			// we are not calling updateIncomingCommunicationMetrics()
-			b.addRequestInFlightMetrics(-1)
-			response.errors <- dead
-			continue
-		}
-
-		headerLength := getHeaderLength(response.headerVersion)
-		header := make([]byte, headerLength)
-
-		bytesReadHeader, err := b.readFull(header)
-		requestLatency := time.Since(response.requestTime)
-		if err != nil {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			dead = err
-			response.errors <- err
-			continue
-		}
-
-		decodedHeader := responseHeader{}
-		err = versionedDecode(header, &decodedHeader, response.headerVersion)
-		if err != nil {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			dead = err
-			response.errors <- err
-			continue
-		}
-		if decodedHeader.correlationID != response.correlationID {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			// TODO if decoded ID < cur ID, discard until we catch up
-			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
-			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
-			response.errors <- dead
-			continue
-		}
-
-		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
-		bytesReadBody, err := b.readFull(buf)
-		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
-		if err != nil {
-			dead = err
-			response.errors <- err
-			continue
-		}
-
-		response.packets <- buf
-	}
-	close(b.done)
-}
-
-func getHeaderLength(headerVersion int16) int8 {
-	if headerVersion < 1 {
-		return 8
-	} else {
-		// header contains additional tagged field length (0), we don't support actual tags yet.
-		return 9
-	}
-}
-
-func (b *Broker) authenticateViaSASL() error {
-	switch b.conf.Net.SASL.Mechanism {
-	case SASLTypeOAuth:
-		return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
-	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
-		return b.sendAndReceiveSASLSCRAMv1()
-	case SASLTypeGSSAPI:
-		return b.sendAndReceiveKerberos()
-	default:
-		return b.sendAndReceiveSASLPlainAuth()
-	}
-}
-
-func (b *Broker) sendAndReceiveKerberos() error {
-	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
-	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
-		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
-	}
-	return b.kerberosAuthenticator.Authorize(b)
-}
-
-func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
-	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
-
-	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return err
-	}
-
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytes, err := b.write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
-		return err
-	}
-	b.correlationID++
-
-	header := make([]byte, 8) // response header
-	_, err = b.readFull(header)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
-		return err
-	}
-
-	length := binary.BigEndian.Uint32(header[:4])
-	payload := make([]byte, length-4)
-	n, err := b.readFull(payload)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
-		return err
-	}
-
-	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
-	res := &SaslHandshakeResponse{}
-
-	err = versionedDecode(payload, res, 0)
-	if err != nil {
-		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
-		return err
-	}
-
-	if res.Err != ErrNoError {
-		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
-		return res.Err
-	}
-
-	Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
-	return nil
-}
-
-// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
-// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
-// wraps the SASL flow in the Kafka protocol, which allows for returning
-// meaningful errors on authentication failure.
-//
-// In SASL Plain, Kafka expects the auth header to be in the following format
-// Message format (from https://tools.ietf.org/html/rfc4616):
-//
-//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
-//   authcid   = 1*SAFE ; MUST accept up to 255 octets
-//   authzid   = 1*SAFE ; MUST accept up to 255 octets
-//   passwd    = 1*SAFE ; MUST accept up to 255 octets
-//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
-//
-//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
-//                  ;; any UTF-8 encoded Unicode character except NUL
-//
-// With SASL v0 handshake and auth then:
-// When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection.
-//
-// With SASL v1 handshake and auth then:
-// When credentials are invalid, Kafka replies with a SaslAuthenticate response
-// containing an error code and message detailing the authentication failure.
-func (b *Broker) sendAndReceiveSASLPlainAuth() error {
-	// default to V0 to allow for backward compatibility when SASL is enabled
-	// but not the handshake
-	if b.conf.Net.SASL.Handshake {
-		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
-		if handshakeErr != nil {
-			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
-			return handshakeErr
-		}
-	}
-
-	if b.conf.Net.SASL.Version == SASLHandshakeV1 {
-		return b.sendAndReceiveV1SASLPlainAuth()
-	}
-	return b.sendAndReceiveV0SASLPlainAuth()
-}
-
-// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
-func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
-	authBytes := make([]byte, length+4) // 4 byte length header + auth data
-	binary.BigEndian.PutUint32(authBytes, uint32(length))
-	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
-
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytesWritten, err := b.write(authBytes)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	header := make([]byte, 4)
-	n, err := b.readFull(header)
-	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
-	// If the credentials are valid, we would get a 4 byte response filled with null characters.
-	// Otherwise, the broker closes the connection and we get an EOF
-	if err != nil {
-		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
-	return nil
-}
-
-// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
-func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
-	correlationID := b.correlationID
-
-	requestTime := time.Now()
-
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	b.correlationID++
-
-	bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
-	b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
-
-	// With v1 sasl we get an error message set in the response we can return
-	if err != nil {
-		Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	return nil
-}
-
-// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
-// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
-func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
-	if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
-		return err
-	}
-
-	token, err := provider.Token()
-	if err != nil {
-		return err
-	}
-
-	message, err := buildClientFirstMessage(token)
-	if err != nil {
-		return err
-	}
-
-	challenged, err := b.sendClientMessage(message)
-	if err != nil {
-		return err
-	}
-
-	if challenged {
-		// Abort the token exchange. The broker returns the failure code.
-		_, err = b.sendClientMessage([]byte(`\x01`))
-	}
-
-	return err
-}
-
-// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
-// if the broker responds with a challenge, in which case the token is
-// rejected.
-func (b *Broker) sendClientMessage(message []byte) (bool, error) {
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	correlationID := b.correlationID
-
-	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		return false, err
-	}
-
-	b.correlationID++
-
-	res := &SaslAuthenticateResponse{}
-	bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
-
-	requestLatency := time.Since(requestTime)
-	b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-
-	isChallenge := len(res.SaslAuthBytes) > 0
-
-	if isChallenge && err != nil {
-		Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
-	}
-
-	return isChallenge, err
-}
-
-func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
-	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
-		return err
-	}
-
-	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
-	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
-		return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
-	}
-
-	msg, err := scramClient.Step("")
-	if err != nil {
-		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-	}
-
-	for !scramClient.Done() {
-		requestTime := time.Now()
-		// Will be decremented in updateIncomingCommunicationMetrics (except error)
-		b.addRequestInFlightMetrics(1)
-		correlationID := b.correlationID
-		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
-		b.updateOutgoingCommunicationMetrics(bytesWritten)
-		if err != nil {
-			b.addRequestInFlightMetrics(-1)
-			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-			return err
-		}
-
-		b.correlationID++
-		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
-		if err != nil {
-			b.addRequestInFlightMetrics(-1)
-			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
-			return err
-		}
-
-		b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
-		msg, err = scramClient.Step(string(challenge))
-		if err != nil {
-			Logger.Println("SASL authentication failed", err)
-			return err
-		}
-	}
-
-	Logger.Println("SASL authentication succeeded")
-	return nil
-}
-
-func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
-	rb := &SaslAuthenticateRequest{msg}
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
-	buf := make([]byte, responseLengthSize+correlationIDSize)
-	_, err := b.readFull(buf)
-	if err != nil {
-		return nil, err
-	}
-
-	header := responseHeader{}
-	err = versionedDecode(buf, &header, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	if header.correlationID != correlationID {
-		return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
-	}
-
-	buf = make([]byte, header.length-correlationIDSize)
-	_, err = b.readFull(buf)
-	if err != nil {
-		return nil, err
-	}
-
-	res := &SaslAuthenticateResponse{}
-	if err := versionedDecode(buf, res, 0); err != nil {
-		return nil, err
-	}
-	if res.Err != ErrNoError {
-		return nil, res.Err
-	}
-	return res.SaslAuthBytes, nil
-}
-
-// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
-// https://tools.ietf.org/html/rfc7628
-func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
-	var ext string
-
-	if token.Extensions != nil && len(token.Extensions) > 0 {
-		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
-			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
-		}
-		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
-	}
-
-	resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
-
-	return resp, nil
-}
-
-// mapToString returns a list of key-value pairs ordered by key.
-// keyValSep separates the key from the value. elemSep separates each pair.
-func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
-	buf := make([]string, 0, len(extensions))
-
-	for k, v := range extensions {
-		buf = append(buf, k+keyValSep+v)
-	}
-
-	sort.Strings(buf)
-
-	return strings.Join(buf, elemSep)
-}
-
-func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
-	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
-	rb := &SaslAuthenticateRequest{authBytes}
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
-	rb := &SaslAuthenticateRequest{initialResp}
-
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
-	buf := make([]byte, responseLengthSize+correlationIDSize)
-	bytesRead, err := b.readFull(buf)
-	if err != nil {
-		return bytesRead, err
-	}
-
-	header := responseHeader{}
-	err = versionedDecode(buf, &header, 0)
-	if err != nil {
-		return bytesRead, err
-	}
-
-	if header.correlationID != correlationID {
-		return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
-	}
-
-	buf = make([]byte, header.length-correlationIDSize)
-	c, err := b.readFull(buf)
-	bytesRead += c
-	if err != nil {
-		return bytesRead, err
-	}
-
-	if err := versionedDecode(buf, res, 0); err != nil {
-		return bytesRead, err
-	}
-
-	if res.Err != ErrNoError {
-		return bytesRead, res.Err
-	}
-
-	return bytesRead, nil
-}
-
-func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
-	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
-	b.responseRate.Mark(1)
-
-	if b.brokerResponseRate != nil {
-		b.brokerResponseRate.Mark(1)
-	}
-
-	responseSize := int64(bytes)
-	b.incomingByteRate.Mark(responseSize)
-	if b.brokerIncomingByteRate != nil {
-		b.brokerIncomingByteRate.Mark(responseSize)
-	}
-
-	b.responseSize.Update(responseSize)
-	if b.brokerResponseSize != nil {
-		b.brokerResponseSize.Update(responseSize)
-	}
-}
-
-func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
-	requestLatencyInMs := int64(requestLatency / time.Millisecond)
-	b.requestLatency.Update(requestLatencyInMs)
-
-	if b.brokerRequestLatency != nil {
-		b.brokerRequestLatency.Update(requestLatencyInMs)
-	}
-
-	b.addRequestInFlightMetrics(-1)
-}
-
-func (b *Broker) addRequestInFlightMetrics(i int64) {
-	b.requestsInFlight.Inc(i)
-	if b.brokerRequestsInFlight != nil {
-		b.brokerRequestsInFlight.Inc(i)
-	}
-}
-
-func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
-	b.requestRate.Mark(1)
-	if b.brokerRequestRate != nil {
-		b.brokerRequestRate.Mark(1)
-	}
-
-	requestSize := int64(bytes)
-	b.outgoingByteRate.Mark(requestSize)
-	if b.brokerOutgoingByteRate != nil {
-		b.brokerOutgoingByteRate.Mark(requestSize)
-	}
-
-	b.requestSize.Update(requestSize)
-	if b.brokerRequestSize != nil {
-		b.brokerRequestSize.Update(requestSize)
-	}
-}
-
-func (b *Broker) registerMetrics() {
-	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
-	b.brokerRequestRate = b.registerMeter("request-rate")
-	b.brokerRequestSize = b.registerHistogram("request-size")
-	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
-	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
-	b.brokerResponseRate = b.registerMeter("response-rate")
-	b.brokerResponseSize = b.registerHistogram("response-size")
-	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
-}
-
-func (b *Broker) unregisterMetrics() {
-	for _, name := range b.registeredMetrics {
-		b.conf.MetricRegistry.Unregister(name)
-	}
-	b.registeredMetrics = nil
-}
-
-func (b *Broker) registerMeter(name string) metrics.Meter {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerHistogram(name string) metrics.Histogram {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerCounter(name string) metrics.Counter {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		cfg = &tls.Config{
-			MinVersion: tls.VersionTLS12,
-		}
-	}
-	if cfg.ServerName != "" {
-		return cfg
-	}
-
-	c := cfg.Clone()
-	sn, _, err := net.SplitHostPort(addr)
-	if err != nil {
-		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
-	}
-	c.ServerName = sn
-	return c
-}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
deleted file mode 100644
index c0918ba..0000000
--- a/vendor/github.com/Shopify/sarama/client.go
+++ /dev/null
@@ -1,1112 +0,0 @@
-package sarama
-
-import (
-	"math/rand"
-	"sort"
-	"sync"
-	"time"
-)
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. It is safe to share a client amongst many
-// users, however Kafka will process requests from a single client strictly in serial,
-// so it is generally more efficient to use the default one client per producer/consumer.
-type Client interface {
-	// Config returns the Config struct of the client. This struct should not be
-	// altered after it has been created.
-	Config() *Config
-
-	// Controller returns the cluster controller broker. It will return a
-	// locally cached value if it's available. You can call RefreshController
-	// to update the cached value. Requires Kafka 0.10 or higher.
-	Controller() (*Broker, error)
-
-	// RefreshController retrieves the cluster controller from fresh metadata
-	// and stores it in the local cache. Requires Kafka 0.10 or higher.
-	RefreshController() (*Broker, error)
-
-	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
-	Brokers() []*Broker
-
-	// Broker returns the active Broker if available for the broker ID.
-	Broker(brokerID int32) (*Broker, error)
-
-	// Topics returns the set of available topics as retrieved from cluster metadata.
-	Topics() ([]string, error)
-
-	// Partitions returns the sorted list of all partition IDs for the given topic.
-	Partitions(topic string) ([]int32, error)
-
-	// WritablePartitions returns the sorted list of all writable partition IDs for
-	// the given topic, where "writable" means "having a valid leader accepting
-	// writes".
-	WritablePartitions(topic string) ([]int32, error)
-
-	// Leader returns the broker object that is the leader of the current
-	// topic/partition, as determined by querying the cluster metadata.
-	Leader(topic string, partitionID int32) (*Broker, error)
-
-	// Replicas returns the set of all replica IDs for the given partition.
-	Replicas(topic string, partitionID int32) ([]int32, error)
-
-	// InSyncReplicas returns the set of all in-sync replica IDs for the given
-	// partition. In-sync replicas are replicas which are fully caught up with
-	// the partition leader.
-	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
-
-	// OfflineReplicas returns the set of all offline replica IDs for the given
-	// partition. Offline replicas are replicas which are offline
-	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
-
-	// RefreshBrokers takes a list of addresses to be used as seed brokers.
-	// Existing broker connections are closed and the updated list of seed brokers
-	// will be used for the next metadata fetch.
-	RefreshBrokers(addrs []string) error
-
-	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
-	// available metadata for those topics. If no topics are provided, it will refresh
-	// metadata for all topics.
-	RefreshMetadata(topics ...string) error
-
-	// GetOffset queries the cluster to get the most recent available offset at the
-	// given time (in milliseconds) on the topic/partition combination.
-	// Time should be OffsetOldest for the earliest available offset,
-	// OffsetNewest for the offset of the message that will be produced next, or a time.
-	GetOffset(topic string, partitionID int32, time int64) (int64, error)
-
-	// Coordinator returns the coordinating broker for a consumer group. It will
-	// return a locally cached value if it's available. You can call
-	// RefreshCoordinator to update the cached value. This function only works on
-	// Kafka 0.8.2 and higher.
-	Coordinator(consumerGroup string) (*Broker, error)
-
-	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
-	// in local cache. This function only works on Kafka 0.8.2 and higher.
-	RefreshCoordinator(consumerGroup string) error
-
-	// InitProducerID retrieves information required for Idempotent Producer
-	InitProducerID() (*InitProducerIDResponse, error)
-
-	// Close shuts down all broker connections managed by this client. It is required
-	// to call this function before a client object passes out of scope, as it will
-	// otherwise leak memory. You must close any Producers or Consumers using a client
-	// before you close the client.
-	Close() error
-
-	// Closed returns true if the client has already had Close called on it
-	Closed() bool
-}
-
-const (
-	// OffsetNewest stands for the log head offset, i.e. the offset that will be
-	// assigned to the next message that will be produced to the partition. You
-	// can send this to a client's GetOffset method to get this offset, or when
-	// calling ConsumePartition to start consuming new messages.
-	OffsetNewest int64 = -1
-	// OffsetOldest stands for the oldest offset available on the broker for a
-	// partition. You can send this to a client's GetOffset method to get this
-	// offset, or when calling ConsumePartition to start consuming from the
-	// oldest offset that is still available on the broker.
-	OffsetOldest int64 = -2
-)
-
-type client struct {
-	conf           *Config
-	closer, closed chan none // for shutting down background metadata updater
-
-	// the broker addresses given to us through the constructor are not guaranteed to be returned in
-	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
-	// so we store them separately
-	seedBrokers []*Broker
-	deadSeeds   []*Broker
-
-	controllerID   int32                                   // cluster controller broker id
-	brokers        map[int32]*Broker                       // maps broker ids to brokers
-	metadata       map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
-	metadataTopics map[string]none                         // topics that need to collect metadata
-	coordinators   map[string]int32                        // Maps consumer group names to coordinating broker IDs
-
-	// If the number of partitions is large, we can get some churn calling cachedPartitions,
-	// so the result is cached.  It is important to update this value whenever metadata is changed
-	cachedPartitionsResults map[string][maxPartitionIndex][]int32
-
-	lock sync.RWMutex // protects access to the maps that hold cluster state.
-}
-
-// NewClient creates a new Client. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(addrs []string, conf *Config) (Client, error) {
-	Logger.Println("Initializing new client")
-
-	if conf == nil {
-		conf = NewConfig()
-	}
-
-	if err := conf.Validate(); err != nil {
-		return nil, err
-	}
-
-	if len(addrs) < 1 {
-		return nil, ConfigurationError("You must provide at least one broker address")
-	}
-
-	client := &client{
-		conf:                    conf,
-		closer:                  make(chan none),
-		closed:                  make(chan none),
-		brokers:                 make(map[int32]*Broker),
-		metadata:                make(map[string]map[int32]*PartitionMetadata),
-		metadataTopics:          make(map[string]none),
-		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
-		coordinators:            make(map[string]int32),
-	}
-
-	client.randomizeSeedBrokers(addrs)
-
-	if conf.Metadata.Full {
-		// do an initial fetch of all cluster metadata by specifying an empty list of topics
-		err := client.RefreshMetadata()
-		switch err {
-		case nil:
-			break
-		case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
-			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
-			Logger.Println(err)
-		default:
-			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
-			_ = client.Close()
-			return nil, err
-		}
-	}
-	go withRecover(client.backgroundMetadataUpdater)
-
-	Logger.Println("Successfully initialized new client")
-
-	return client, nil
-}
-
-func (client *client) Config() *Config {
-	return client.conf
-}
-
-func (client *client) Brokers() []*Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	brokers := make([]*Broker, 0, len(client.brokers))
-	for _, broker := range client.brokers {
-		brokers = append(brokers, broker)
-	}
-	return brokers
-}
-
-func (client *client) Broker(brokerID int32) (*Broker, error) {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	broker, ok := client.brokers[brokerID]
-	if !ok {
-		return nil, ErrBrokerNotFound
-	}
-	_ = broker.Open(client.conf)
-	return broker, nil
-}
-
-func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
-	var err error
-	for broker := client.any(); broker != nil; broker = client.any() {
-		req := &InitProducerIDRequest{}
-
-		response, err := broker.InitProducerID(req)
-		switch err.(type) {
-		case nil:
-			return response, nil
-		default:
-			// some error, remove that broker and try again
-			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-		}
-	}
-	return nil, err
-}
-
-func (client *client) Close() error {
-	if client.Closed() {
-		// Chances are this is being called from a defer() and the error will go unobserved
-		// so we go ahead and log the event in this case.
-		Logger.Printf("Close() called on already closed client")
-		return ErrClosedClient
-	}
-
-	// shutdown and wait for the background thread before we take the lock, to avoid races
-	close(client.closer)
-	<-client.closed
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	Logger.Println("Closing Client")
-
-	for _, broker := range client.brokers {
-		safeAsyncClose(broker)
-	}
-
-	for _, broker := range client.seedBrokers {
-		safeAsyncClose(broker)
-	}
-
-	client.brokers = nil
-	client.metadata = nil
-	client.metadataTopics = nil
-
-	return nil
-}
-
-func (client *client) Closed() bool {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	return client.brokers == nil
-}
-
-func (client *client) Topics() ([]string, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	ret := make([]string, 0, len(client.metadata))
-	for topic := range client.metadata {
-		ret = append(ret, topic)
-	}
-
-	return ret, nil
-}
-
-func (client *client) MetadataTopics() ([]string, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	ret := make([]string, 0, len(client.metadataTopics))
-	for topic := range client.metadataTopics {
-		ret = append(ret, topic)
-	}
-
-	return ret, nil
-}
-
-func (client *client) Partitions(topic string) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	partitions := client.cachedPartitions(topic, allPartitions)
-
-	if len(partitions) == 0 {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		partitions = client.cachedPartitions(topic, allPartitions)
-	}
-
-	// no partitions found after refresh metadata
-	if len(partitions) == 0 {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	return partitions, nil
-}
-
-func (client *client) WritablePartitions(topic string) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	partitions := client.cachedPartitions(topic, writablePartitions)
-
-	// len==0 catches when it's nil (no such topic) and the odd case when every single
-	// partition is undergoing leader election simultaneously. Callers have to be able to handle
-	// this function returning an empty slice (which is a valid return value) but catching it
-	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
-	// a metadata refresh as a nicety so callers can just try again and don't have to manually
-	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
-	if len(partitions) == 0 {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		partitions = client.cachedPartitions(topic, writablePartitions)
-	}
-
-	if partitions == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	return partitions, nil
-}
-
-func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.Replicas), metadata.Err
-	}
-	return dupInt32Slice(metadata.Replicas), nil
-}
-
-func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.Isr), metadata.Err
-	}
-	return dupInt32Slice(metadata.Isr), nil
-}
-
-func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
-	}
-	return dupInt32Slice(metadata.OfflineReplicas), nil
-}
-
-func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	leader, err := client.cachedLeader(topic, partitionID)
-
-	if leader == nil {
-		err = client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		leader, err = client.cachedLeader(topic, partitionID)
-	}
-
-	return leader, err
-}
-
-func (client *client) RefreshBrokers(addrs []string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	for _, broker := range client.brokers {
-		_ = broker.Close()
-		delete(client.brokers, broker.ID())
-	}
-
-	client.seedBrokers = nil
-	client.deadSeeds = nil
-
-	client.randomizeSeedBrokers(addrs)
-
-	return nil
-}
-
-func (client *client) RefreshMetadata(topics ...string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
-	// error. This handles the case by returning an error instead of sending it
-	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
-	for _, topic := range topics {
-		if topic == "" {
-			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
-		}
-	}
-
-	deadline := time.Time{}
-	if client.conf.Metadata.Timeout > 0 {
-		deadline = time.Now().Add(client.conf.Metadata.Timeout)
-	}
-	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
-}
-
-func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
-	if client.Closed() {
-		return -1, ErrClosedClient
-	}
-
-	offset, err := client.getOffset(topic, partitionID, time)
-	if err != nil {
-		if err := client.RefreshMetadata(topic); err != nil {
-			return -1, err
-		}
-		return client.getOffset(topic, partitionID, time)
-	}
-
-	return offset, err
-}
-
-func (client *client) Controller() (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
-		return nil, ErrUnsupportedVersion
-	}
-
-	controller := client.cachedController()
-	if controller == nil {
-		if err := client.refreshMetadata(); err != nil {
-			return nil, err
-		}
-		controller = client.cachedController()
-	}
-
-	if controller == nil {
-		return nil, ErrControllerNotAvailable
-	}
-
-	_ = controller.Open(client.conf)
-	return controller, nil
-}
-
-// deregisterController removes the cached controllerID
-func (client *client) deregisterController() {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	delete(client.brokers, client.controllerID)
-}
-
-// RefreshController retrieves the cluster controller from fresh metadata
-// and stores it in the local cache. Requires Kafka 0.10 or higher.
-func (client *client) RefreshController() (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.deregisterController()
-
-	if err := client.refreshMetadata(); err != nil {
-		return nil, err
-	}
-
-	controller := client.cachedController()
-	if controller == nil {
-		return nil, ErrControllerNotAvailable
-	}
-
-	_ = controller.Open(client.conf)
-	return controller, nil
-}
-
-func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	coordinator := client.cachedCoordinator(consumerGroup)
-
-	if coordinator == nil {
-		if err := client.RefreshCoordinator(consumerGroup); err != nil {
-			return nil, err
-		}
-		coordinator = client.cachedCoordinator(consumerGroup)
-	}
-
-	if coordinator == nil {
-		return nil, ErrConsumerCoordinatorNotAvailable
-	}
-
-	_ = coordinator.Open(client.conf)
-	return coordinator, nil
-}
-
-func (client *client) RefreshCoordinator(consumerGroup string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
-	if err != nil {
-		return err
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	client.registerBroker(response.Coordinator)
-	client.coordinators[consumerGroup] = response.Coordinator.ID()
-	return nil
-}
-
-// private broker management helpers
-
-func (client *client) randomizeSeedBrokers(addrs []string) {
-	random := rand.New(rand.NewSource(time.Now().UnixNano()))
-	for _, index := range random.Perm(len(addrs)) {
-		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
-	}
-}
-
-func (client *client) updateBroker(brokers []*Broker) {
-	currentBroker := make(map[int32]*Broker, len(brokers))
-
-	for _, broker := range brokers {
-		currentBroker[broker.ID()] = broker
-		if client.brokers[broker.ID()] == nil { // add new broker
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
-		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
-			safeAsyncClose(client.brokers[broker.ID()])
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
-		}
-	}
-
-	for id, broker := range client.brokers {
-		if _, exist := currentBroker[id]; !exist { // remove old broker
-			safeAsyncClose(broker)
-			delete(client.brokers, id)
-			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
-		}
-	}
-}
-
-// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
-// in the brokers map. It returns the broker that is registered, which may be the provided broker,
-// or a previously registered Broker instance. You must hold the write lock before calling this function.
-func (client *client) registerBroker(broker *Broker) {
-	if client.brokers == nil {
-		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
-		return
-	}
-
-	if client.brokers[broker.ID()] == nil {
-		client.brokers[broker.ID()] = broker
-		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
-	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
-		safeAsyncClose(client.brokers[broker.ID()])
-		client.brokers[broker.ID()] = broker
-		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
-	}
-}
-
-// deregisterBroker removes a broker from the seedsBroker list, and if it's
-// not the seedbroker, removes it from brokers map completely.
-func (client *client) deregisterBroker(broker *Broker) {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
-		client.deadSeeds = append(client.deadSeeds, broker)
-		client.seedBrokers = client.seedBrokers[1:]
-	} else {
-		// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
-		// but we really shouldn't have to; once that loop is made better this case can be
-		// removed, and the function generally can be renamed from `deregisterBroker` to
-		// `nextSeedBroker` or something
-		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
-		delete(client.brokers, broker.ID())
-	}
-}
-
-func (client *client) resurrectDeadBrokers() {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
-	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
-	client.deadSeeds = nil
-}
-
-func (client *client) any() *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	if len(client.seedBrokers) > 0 {
-		_ = client.seedBrokers[0].Open(client.conf)
-		return client.seedBrokers[0]
-	}
-
-	// not guaranteed to be random *or* deterministic
-	for _, broker := range client.brokers {
-		_ = broker.Open(client.conf)
-		return broker
-	}
-
-	return nil
-}
-
-// private caching/lazy metadata helpers
-
-type partitionType int
-
-const (
-	allPartitions partitionType = iota
-	writablePartitions
-	// If you add any more types, update the partition cache in update()
-
-	// Ensure this is the last partition type value
-	maxPartitionIndex
-)
-
-func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.metadata[topic]
-	if partitions != nil {
-		return partitions[partitionID]
-	}
-
-	return nil
-}
-
-func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions, exists := client.cachedPartitionsResults[topic]
-
-	if !exists {
-		return nil
-	}
-	return partitions[partitionSet]
-}
-
-func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
-	partitions := client.metadata[topic]
-
-	if partitions == nil {
-		return nil
-	}
-
-	ret := make([]int32, 0, len(partitions))
-	for _, partition := range partitions {
-		if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
-			continue
-		}
-		ret = append(ret, partition.ID)
-	}
-
-	sort.Sort(int32Slice(ret))
-	return ret
-}
-
-func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.metadata[topic]
-	if partitions != nil {
-		metadata, ok := partitions[partitionID]
-		if ok {
-			if metadata.Err == ErrLeaderNotAvailable {
-				return nil, ErrLeaderNotAvailable
-			}
-			b := client.brokers[metadata.Leader]
-			if b == nil {
-				return nil, ErrLeaderNotAvailable
-			}
-			_ = b.Open(client.conf)
-			return b, nil
-		}
-	}
-
-	return nil, ErrUnknownTopicOrPartition
-}
-
-func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
-	broker, err := client.Leader(topic, partitionID)
-	if err != nil {
-		return -1, err
-	}
-
-	request := &OffsetRequest{}
-	if client.conf.Version.IsAtLeast(V0_10_1_0) {
-		request.Version = 1
-	}
-	request.AddBlock(topic, partitionID, time, 1)
-
-	response, err := broker.GetAvailableOffsets(request)
-	if err != nil {
-		_ = broker.Close()
-		return -1, err
-	}
-
-	block := response.GetBlock(topic, partitionID)
-	if block == nil {
-		_ = broker.Close()
-		return -1, ErrIncompleteResponse
-	}
-	if block.Err != ErrNoError {
-		return -1, block.Err
-	}
-	if len(block.Offsets) != 1 {
-		return -1, ErrOffsetOutOfRange
-	}
-
-	return block.Offsets[0], nil
-}
-
-// core metadata update logic
-
-func (client *client) backgroundMetadataUpdater() {
-	defer close(client.closed)
-
-	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
-		return
-	}
-
-	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			if err := client.refreshMetadata(); err != nil {
-				Logger.Println("Client background metadata update:", err)
-			}
-		case <-client.closer:
-			return
-		}
-	}
-}
-
-func (client *client) refreshMetadata() error {
-	var topics []string
-
-	if !client.conf.Metadata.Full {
-		if specificTopics, err := client.MetadataTopics(); err != nil {
-			return err
-		} else if len(specificTopics) == 0 {
-			return ErrNoTopicsToUpdateMetadata
-		} else {
-			topics = specificTopics
-		}
-	}
-
-	if err := client.RefreshMetadata(topics...); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
-	pastDeadline := func(backoff time.Duration) bool {
-		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
-			// we are past the deadline
-			return true
-		}
-		return false
-	}
-	retry := func(err error) error {
-		if attemptsRemaining > 0 {
-			backoff := client.computeBackoff(attemptsRemaining)
-			if pastDeadline(backoff) {
-				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
-				return err
-			}
-			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
-			if backoff > 0 {
-				time.Sleep(backoff)
-			}
-			return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
-		}
-		return err
-	}
-
-	broker := client.any()
-	for ; broker != nil && !pastDeadline(0); broker = client.any() {
-		allowAutoTopicCreation := true
-		if len(topics) > 0 {
-			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
-		} else {
-			allowAutoTopicCreation = false
-			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
-		}
-
-		req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
-		if client.conf.Version.IsAtLeast(V1_0_0_0) {
-			req.Version = 5
-		} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
-			req.Version = 1
-		}
-		response, err := broker.GetMetadata(req)
-		switch err := err.(type) {
-		case nil:
-			allKnownMetaData := len(topics) == 0
-			// valid response, use it
-			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
-			if shouldRetry {
-				Logger.Println("client/metadata found some partitions to be leaderless")
-				return retry(err) // note: err can be nil
-			}
-			return err
-
-		case PacketEncodingError:
-			// didn't even send, return the error
-			return err
-
-		case KError:
-			// if SASL auth error return as this _should_ be a non retryable err for all brokers
-			if err == ErrSASLAuthenticationFailed {
-				Logger.Println("client/metadata failed SASL authentication")
-				return err
-			}
-
-			if err == ErrTopicAuthorizationFailed {
-				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
-				return err
-			}
-			// else remove that broker and try again
-			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-
-		default:
-			// some other error, remove that broker and try again
-			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-		}
-	}
-
-	if broker != nil {
-		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
-		return retry(ErrOutOfBrokers)
-	}
-
-	Logger.Println("client/metadata no available broker to send metadata request to")
-	client.resurrectDeadBrokers()
-	return retry(ErrOutOfBrokers)
-}
-
-// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
-func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
-	if client.Closed() {
-		return
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	// For all the brokers we received:
-	// - if it is a new ID, save it
-	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
-	// - if some brokers is not exist in it, remove old broker
-	// - otherwise ignore it, replacing our existing one would just bounce the connection
-	client.updateBroker(data.Brokers)
-
-	client.controllerID = data.ControllerID
-
-	if allKnownMetaData {
-		client.metadata = make(map[string]map[int32]*PartitionMetadata)
-		client.metadataTopics = make(map[string]none)
-		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
-	}
-	for _, topic := range data.Topics {
-		// topics must be added firstly to `metadataTopics` to guarantee that all
-		// requested topics must be recorded to keep them trackable for periodically
-		// metadata refresh.
-		if _, exists := client.metadataTopics[topic.Name]; !exists {
-			client.metadataTopics[topic.Name] = none{}
-		}
-		delete(client.metadata, topic.Name)
-		delete(client.cachedPartitionsResults, topic.Name)
-
-		switch topic.Err {
-		case ErrNoError:
-			// no-op
-		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
-			err = topic.Err
-			continue
-		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
-			err = topic.Err
-			retry = true
-			continue
-		case ErrLeaderNotAvailable: // retry, but store partial partition results
-			retry = true
-		default: // don't retry, don't store partial results
-			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
-			err = topic.Err
-			continue
-		}
-
-		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
-		for _, partition := range topic.Partitions {
-			client.metadata[topic.Name][partition.ID] = partition
-			if partition.Err == ErrLeaderNotAvailable {
-				retry = true
-			}
-		}
-
-		var partitionCache [maxPartitionIndex][]int32
-		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
-		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
-		client.cachedPartitionsResults[topic.Name] = partitionCache
-	}
-
-	return
-}
-
-func (client *client) cachedCoordinator(consumerGroup string) *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
-		return client.brokers[coordinatorID]
-	}
-	return nil
-}
-
-func (client *client) cachedController() *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	return client.brokers[client.controllerID]
-}
-
-func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
-	if client.conf.Metadata.Retry.BackoffFunc != nil {
-		maxRetries := client.conf.Metadata.Retry.Max
-		retries := maxRetries - attemptsRemaining
-		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
-	}
-	return client.conf.Metadata.Retry.Backoff
-}
-
-func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
-	retry := func(err error) (*FindCoordinatorResponse, error) {
-		if attemptsRemaining > 0 {
-			backoff := client.computeBackoff(attemptsRemaining)
-			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
-			time.Sleep(backoff)
-			return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
-		}
-		return nil, err
-	}
-
-	for broker := client.any(); broker != nil; broker = client.any() {
-		Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
-
-		request := new(FindCoordinatorRequest)
-		request.CoordinatorKey = consumerGroup
-		request.CoordinatorType = CoordinatorGroup
-
-		response, err := broker.FindCoordinator(request)
-		if err != nil {
-			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
-
-			switch err.(type) {
-			case PacketEncodingError:
-				return nil, err
-			default:
-				_ = broker.Close()
-				client.deregisterBroker(broker)
-				continue
-			}
-		}
-
-		switch response.Err {
-		case ErrNoError:
-			Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
-			return response, nil
-
-		case ErrConsumerCoordinatorNotAvailable:
-			Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
-
-			// This is very ugly, but this scenario will only happen once per cluster.
-			// The __consumer_offsets topic only has to be created one time.
-			// The number of partitions not configurable, but partition 0 should always exist.
-			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
-				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
-				time.Sleep(2 * time.Second)
-			}
-
-			return retry(ErrConsumerCoordinatorNotAvailable)
-		case ErrGroupAuthorizationFailed:
-			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
-			return retry(ErrGroupAuthorizationFailed)
-
-		default:
-			return nil, response.Err
-		}
-	}
-
-	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
-	client.resurrectDeadBrokers()
-	return retry(ErrOutOfBrokers)
-}
-
-// nopCloserClient embeds an existing Client, but disables
-// the Close method (yet all other methods pass
-// through unchanged). This is for use in larger structs
-// where it is undesirable to close the client that was
-// passed in by the caller.
-type nopCloserClient struct {
-	Client
-}
-
-// Close intercepts and purposely does not call the underlying
-// client's Close() method.
-func (ncc *nopCloserClient) Close() error {
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
deleted file mode 100644
index 12cd7c3..0000000
--- a/vendor/github.com/Shopify/sarama/compress.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"sync"
-
-	snappy "github.com/eapache/go-xerial-snappy"
-	"github.com/pierrec/lz4"
-)
-
-var (
-	lz4WriterPool = sync.Pool{
-		New: func() interface{} {
-			return lz4.NewWriter(nil)
-		},
-	}
-
-	gzipWriterPool = sync.Pool{
-		New: func() interface{} {
-			return gzip.NewWriter(nil)
-		},
-	}
-	gzipWriterPoolForCompressionLevel1 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 1)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel2 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 2)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel3 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 3)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel4 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 4)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel5 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 5)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel6 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 6)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel7 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 7)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel8 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 8)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel9 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 9)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-)
-
-func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
-	switch cc {
-	case CompressionNone:
-		return data, nil
-	case CompressionGZIP:
-		var (
-			err    error
-			buf    bytes.Buffer
-			writer *gzip.Writer
-		)
-
-		switch level {
-		case CompressionLevelDefault:
-			writer = gzipWriterPool.Get().(*gzip.Writer)
-			defer gzipWriterPool.Put(writer)
-			writer.Reset(&buf)
-		case 1:
-			writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel1.Put(writer)
-			writer.Reset(&buf)
-		case 2:
-			writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel2.Put(writer)
-			writer.Reset(&buf)
-		case 3:
-			writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel3.Put(writer)
-			writer.Reset(&buf)
-		case 4:
-			writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel4.Put(writer)
-			writer.Reset(&buf)
-		case 5:
-			writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel5.Put(writer)
-			writer.Reset(&buf)
-		case 6:
-			writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel6.Put(writer)
-			writer.Reset(&buf)
-		case 7:
-			writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel7.Put(writer)
-			writer.Reset(&buf)
-		case 8:
-			writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel8.Put(writer)
-			writer.Reset(&buf)
-		case 9:
-			writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel9.Put(writer)
-			writer.Reset(&buf)
-		default:
-			writer, err = gzip.NewWriterLevel(&buf, level)
-			if err != nil {
-				return nil, err
-			}
-		}
-		if _, err := writer.Write(data); err != nil {
-			return nil, err
-		}
-		if err := writer.Close(); err != nil {
-			return nil, err
-		}
-		return buf.Bytes(), nil
-	case CompressionSnappy:
-		return snappy.Encode(data), nil
-	case CompressionLZ4:
-		writer := lz4WriterPool.Get().(*lz4.Writer)
-		defer lz4WriterPool.Put(writer)
-
-		var buf bytes.Buffer
-		writer.Reset(&buf)
-
-		if _, err := writer.Write(data); err != nil {
-			return nil, err
-		}
-		if err := writer.Close(); err != nil {
-			return nil, err
-		}
-		return buf.Bytes(), nil
-	case CompressionZSTD:
-		return zstdCompress(nil, data)
-	default:
-		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
deleted file mode 100644
index 43e739c..0000000
--- a/vendor/github.com/Shopify/sarama/config.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package sarama
-
-import (
-	"compress/gzip"
-	"crypto/tls"
-	"fmt"
-	"io/ioutil"
-	"net"
-	"regexp"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-	"golang.org/x/net/proxy"
-)
-
-const defaultClientID = "sarama"
-
-var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
-
-// Config is used to pass multiple configuration options to Sarama's constructors.
-type Config struct {
-	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
-	Admin struct {
-		Retry struct {
-			// The total number of times to retry sending (retriable) admin requests (default 5).
-			// Similar to the `retries` setting of the JVM AdminClientConfig.
-			Max int
-			// Backoff time between retries of a failed request (default 100ms)
-			Backoff time.Duration
-		}
-		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
-		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
-		Timeout time.Duration
-	}
-
-	// Net is the namespace for network-level properties used by the Broker, and
-	// shared by the Client/Producer/Consumer.
-	Net struct {
-		// How many outstanding requests a connection is allowed to have before
-		// sending on it blocks (default 5).
-		MaxOpenRequests int
-
-		// All three of the below configurations are similar to the
-		// `socket.timeout.ms` setting in JVM kafka. All of them default
-		// to 30 seconds.
-		DialTimeout  time.Duration // How long to wait for the initial connection.
-		ReadTimeout  time.Duration // How long to wait for a response.
-		WriteTimeout time.Duration // How long to wait for a transmit.
-
-		TLS struct {
-			// Whether or not to use TLS when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// The TLS configuration to use for secure connections if
-			// enabled (defaults to nil).
-			Config *tls.Config
-		}
-
-		// SASL based authentication with broker. While there are multiple SASL authentication methods
-		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
-		SASL struct {
-			// Whether or not to use SASL authentication when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// SASLMechanism is the name of the enabled SASL mechanism.
-			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
-			Mechanism SASLMechanism
-			// Version is the SASL Protocol Version to use
-			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
-			Version int16
-			// Whether or not to send the Kafka SASL handshake first if enabled
-			// (defaults to true). You should only set this to false if you're using
-			// a non-Kafka SASL proxy.
-			Handshake bool
-			// AuthIdentity is an (optional) authorization identity (authzid) to
-			// use for SASL/PLAIN authentication (if different from User) when
-			// an authenticated user is permitted to act as the presented
-			// alternative user. See RFC4616 for details.
-			AuthIdentity string
-			// User is the authentication identity (authcid) to present for
-			// SASL/PLAIN or SASL/SCRAM authentication
-			User string
-			// Password for SASL/PLAIN authentication
-			Password string
-			// authz id used for SASL/SCRAM authentication
-			SCRAMAuthzID string
-			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
-			// client used to perform the SCRAM exchange with the server.
-			SCRAMClientGeneratorFunc func() SCRAMClient
-			// TokenProvider is a user-defined callback for generating
-			// access tokens for SASL/OAUTHBEARER auth. See the
-			// AccessTokenProvider interface docs for proper implementation
-			// guidelines.
-			TokenProvider AccessTokenProvider
-
-			GSSAPI GSSAPIConfig
-		}
-
-		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
-		// If zero or positive, keep-alives are enabled.
-		// If negative, keep-alives are disabled.
-		KeepAlive time.Duration
-
-		// LocalAddr is the local address to use when dialing an
-		// address. The address must be of a compatible type for the
-		// network being dialed.
-		// If nil, a local address is automatically chosen.
-		LocalAddr net.Addr
-
-		Proxy struct {
-			// Whether or not to use proxy when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// The proxy dialer to use enabled (defaults to nil).
-			Dialer proxy.Dialer
-		}
-	}
-
-	// Metadata is the namespace for metadata management properties used by the
-	// Client, and shared by the Producer/Consumer.
-	Metadata struct {
-		Retry struct {
-			// The total number of times to retry a metadata request when the
-			// cluster is in the middle of a leader election (default 3).
-			Max int
-			// How long to wait for leader election to occur before retrying
-			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries, maxRetries int) time.Duration
-		}
-		// How frequently to refresh the cluster metadata in the background.
-		// Defaults to 10 minutes. Set to 0 to disable. Similar to
-		// `topic.metadata.refresh.interval.ms` in the JVM version.
-		RefreshFrequency time.Duration
-
-		// Whether to maintain a full set of metadata for all topics, or just
-		// the minimal set that has been necessary so far. The full set is simpler
-		// and usually more convenient, but can take up a substantial amount of
-		// memory if you have many topics and partitions. Defaults to true.
-		Full bool
-
-		// How long to wait for a successful metadata response.
-		// Disabled by default which means a metadata request against an unreachable
-		// cluster (all brokers are unreachable or unresponsive) can take up to
-		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
-		// to fail.
-		Timeout time.Duration
-	}
-
-	// Producer is the namespace for configuration related to producing messages,
-	// used by the Producer.
-	Producer struct {
-		// The maximum permitted size of a message (defaults to 1000000). Should be
-		// set equal to or smaller than the broker's `message.max.bytes`.
-		MaxMessageBytes int
-		// The level of acknowledgement reliability needed from the broker (defaults
-		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
-		// JVM producer.
-		RequiredAcks RequiredAcks
-		// The maximum duration the broker will wait the receipt of the number of
-		// RequiredAcks (defaults to 10 seconds). This is only relevant when
-		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
-		// millisecond resolution, nanoseconds will be truncated. Equivalent to
-		// the JVM producer's `request.timeout.ms` setting.
-		Timeout time.Duration
-		// The type of compression to use on messages (defaults to no compression).
-		// Similar to `compression.codec` setting of the JVM producer.
-		Compression CompressionCodec
-		// The level of compression to use on messages. The meaning depends
-		// on the actual compression type used and defaults to default compression
-		// level for the codec.
-		CompressionLevel int
-		// Generates partitioners for choosing the partition to send messages to
-		// (defaults to hashing the message key). Similar to the `partitioner.class`
-		// setting for the JVM producer.
-		Partitioner PartitionerConstructor
-		// If enabled, the producer will ensure that exactly one copy of each message is
-		// written.
-		Idempotent bool
-
-		// Return specifies what channels will be populated. If they are set to true,
-		// you must read from the respective channels to prevent deadlock. If,
-		// however, this config is used to create a `SyncProducer`, both must be set
-		// to true and you shall not read from the channels since the producer does
-		// this internally.
-		Return struct {
-			// If enabled, successfully delivered messages will be returned on the
-			// Successes channel (default disabled).
-			Successes bool
-
-			// If enabled, messages that failed to deliver will be returned on the
-			// Errors channel, including error (default enabled).
-			Errors bool
-		}
-
-		// The following config options control how often messages are batched up and
-		// sent to the broker. By default, messages are sent as fast as possible, and
-		// all messages received while the current batch is in-flight are placed
-		// into the subsequent batch.
-		Flush struct {
-			// The best-effort number of bytes needed to trigger a flush. Use the
-			// global sarama.MaxRequestSize to set a hard upper limit.
-			Bytes int
-			// The best-effort number of messages needed to trigger a flush. Use
-			// `MaxMessages` to set a hard upper limit.
-			Messages int
-			// The best-effort frequency of flushes. Equivalent to
-			// `queue.buffering.max.ms` setting of JVM producer.
-			Frequency time.Duration
-			// The maximum number of messages the producer will send in a single
-			// broker request. Defaults to 0 for unlimited. Similar to
-			// `queue.buffering.max.messages` in the JVM producer.
-			MaxMessages int
-		}
-
-		Retry struct {
-			// The total number of times to retry sending a message (default 3).
-			// Similar to the `message.send.max.retries` setting of the JVM producer.
-			Max int
-			// How long to wait for the cluster to settle between retries
-			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
-			// JVM producer.
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries, maxRetries int) time.Duration
-		}
-
-		// Interceptors to be called when the producer dispatcher reads the
-		// message for the first time. Interceptors allows to intercept and
-		// possible mutate the message before they are published to Kafka
-		// cluster. *ProducerMessage modified by the first interceptor's
-		// OnSend() is passed to the second interceptor OnSend(), and so on in
-		// the interceptor chain.
-		Interceptors []ProducerInterceptor
-	}
-
-	// Consumer is the namespace for configuration related to consuming messages,
-	// used by the Consumer.
-	Consumer struct {
-
-		// Group is the namespace for configuring consumer group.
-		Group struct {
-			Session struct {
-				// The timeout used to detect consumer failures when using Kafka's group management facility.
-				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
-				// If no heartbeats are received by the broker before the expiration of this session timeout,
-				// then the broker will remove this consumer from the group and initiate a rebalance.
-				// Note that the value must be in the allowable range as configured in the broker configuration
-				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
-				Timeout time.Duration
-			}
-			Heartbeat struct {
-				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
-				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
-				// to facilitate rebalancing when new consumers join or leave the group.
-				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
-				// higher than 1/3 of that value.
-				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
-				Interval time.Duration
-			}
-			Rebalance struct {
-				// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
-				Strategy BalanceStrategy
-				// The maximum allowed time for each worker to join the group once a rebalance has begun.
-				// This is basically a limit on the amount of time needed for all tasks to flush any pending
-				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
-				// the group, which will cause offset commit failures (default 60s).
-				Timeout time.Duration
-
-				Retry struct {
-					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
-					// the load to assign partitions to each consumer. If the set of consumers changes while
-					// this assignment is taking place the rebalance will fail and retry. This setting controls
-					// the maximum number of attempts before giving up (default 4).
-					Max int
-					// Backoff time between retries during rebalance (default 2s)
-					Backoff time.Duration
-				}
-			}
-			Member struct {
-				// Custom metadata to include when joining the group. The user data for all joined members
-				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
-				// coordinator for the group.
-				UserData []byte
-			}
-		}
-
-		Retry struct {
-			// How long to wait after a failing to read from a partition before
-			// trying again (default 2s).
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries int) time.Duration
-		}
-
-		// Fetch is the namespace for controlling how many bytes are retrieved by any
-		// given request.
-		Fetch struct {
-			// The minimum number of message bytes to fetch in a request - the broker
-			// will wait until at least this many are available. The default is 1,
-			// as 0 causes the consumer to spin when no messages are available.
-			// Equivalent to the JVM's `fetch.min.bytes`.
-			Min int32
-			// The default number of message bytes to fetch from the broker in each
-			// request (default 1MB). This should be larger than the majority of
-			// your messages, or else the consumer will spend a lot of time
-			// negotiating sizes and not actually consuming. Similar to the JVM's
-			// `fetch.message.max.bytes`.
-			Default int32
-			// The maximum number of message bytes to fetch from the broker in a
-			// single request. Messages larger than this will return
-			// ErrMessageTooLarge and will not be consumable, so you must be sure
-			// this is at least as large as your largest message. Defaults to 0
-			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
-			// global `sarama.MaxResponseSize` still applies.
-			Max int32
-		}
-		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
-		// bytes to become available before it returns fewer than that anyways. The
-		// default is 250ms, since 0 causes the consumer to spin when no events are
-		// available. 100-500ms is a reasonable range for most cases. Kafka only
-		// supports precision up to milliseconds; nanoseconds will be truncated.
-		// Equivalent to the JVM's `fetch.wait.max.ms`.
-		MaxWaitTime time.Duration
-
-		// The maximum amount of time the consumer expects a message takes to
-		// process for the user. If writing to the Messages channel takes longer
-		// than this, that partition will stop fetching more messages until it
-		// can proceed again.
-		// Note that, since the Messages channel is buffered, the actual grace time is
-		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
-		// If a message is not written to the Messages channel between two ticks
-		// of the expiryTicker then a timeout is detected.
-		// Using a ticker instead of a timer to detect timeouts should typically
-		// result in many fewer calls to Timer functions which may result in a
-		// significant performance improvement if many messages are being sent
-		// and timeouts are infrequent.
-		// The disadvantage of using a ticker instead of a timer is that
-		// timeouts will be less accurate. That is, the effective timeout could
-		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
-		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
-		// between two messages being sent may not be recognized as a timeout.
-		MaxProcessingTime time.Duration
-
-		// Return specifies what channels will be populated. If they are set to true,
-		// you must read from them to prevent deadlock.
-		Return struct {
-			// If enabled, any errors that occurred while consuming are returned on
-			// the Errors channel (default disabled).
-			Errors bool
-		}
-
-		// Offsets specifies configuration for how and when to commit consumed
-		// offsets. This currently requires the manual use of an OffsetManager
-		// but will eventually be automated.
-		Offsets struct {
-			// Deprecated: CommitInterval exists for historical compatibility
-			// and should not be used. Please use Consumer.Offsets.AutoCommit
-			CommitInterval time.Duration
-
-			// AutoCommit specifies configuration for commit messages automatically.
-			AutoCommit struct {
-				// Whether or not to auto-commit updated offsets back to the broker.
-				// (default enabled).
-				Enable bool
-
-				// How frequently to commit updated offsets. Ineffective unless
-				// auto-commit is enabled (default 1s)
-				Interval time.Duration
-			}
-
-			// The initial offset to use if no offset was previously committed.
-			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
-			Initial int64
-
-			// The retention duration for committed offsets. If zero, disabled
-			// (in which case the `offsets.retention.minutes` option on the
-			// broker will be used).  Kafka only supports precision up to
-			// milliseconds; nanoseconds will be truncated. Requires Kafka
-			// broker version 0.9.0 or later.
-			// (default is 0: disabled).
-			Retention time.Duration
-
-			Retry struct {
-				// The total number of times to retry failing commit
-				// requests during OffsetManager shutdown (default 3).
-				Max int
-			}
-		}
-
-		// IsolationLevel support 2 mode:
-		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
-		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
-		IsolationLevel IsolationLevel
-
-		// Interceptors to be called just before the record is sent to the
-		// messages channel. Interceptors allows to intercept and possible
-		// mutate the message before they are returned to the client.
-		// *ConsumerMessage modified by the first interceptor's OnConsume() is
-		// passed to the second interceptor OnConsume(), and so on in the
-		// interceptor chain.
-		Interceptors []ConsumerInterceptor
-	}
-
-	// A user-provided string sent with every request to the brokers for logging,
-	// debugging, and auditing purposes. Defaults to "sarama", but you should
-	// probably set it to something specific to your application.
-	ClientID string
-	// A rack identifier for this client. This can be any string value which
-	// indicates where this client is physically located.
-	// It corresponds with the broker config 'broker.rack'
-	RackID string
-	// The number of events to buffer in internal and external channels. This
-	// permits the producer and consumer to continue processing some messages
-	// in the background while user code is working, greatly improving throughput.
-	// Defaults to 256.
-	ChannelBufferSize int
-	// The version of Kafka that Sarama will assume it is running against.
-	// Defaults to the oldest supported stable version. Since Kafka provides
-	// backwards-compatibility, setting it to a version older than you have
-	// will not break anything, although it may prevent you from using the
-	// latest features. Setting it to a version greater than you are actually
-	// running may lead to random breakage.
-	Version KafkaVersion
-	// The registry to define metrics into.
-	// Defaults to a local registry.
-	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
-	// prior to starting Sarama.
-	// See Examples on how to use the metrics registry
-	MetricRegistry metrics.Registry
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
-	c := &Config{}
-
-	c.Admin.Retry.Max = 5
-	c.Admin.Retry.Backoff = 100 * time.Millisecond
-	c.Admin.Timeout = 3 * time.Second
-
-	c.Net.MaxOpenRequests = 5
-	c.Net.DialTimeout = 30 * time.Second
-	c.Net.ReadTimeout = 30 * time.Second
-	c.Net.WriteTimeout = 30 * time.Second
-	c.Net.SASL.Handshake = true
-	c.Net.SASL.Version = SASLHandshakeV0
-
-	c.Metadata.Retry.Max = 3
-	c.Metadata.Retry.Backoff = 250 * time.Millisecond
-	c.Metadata.RefreshFrequency = 10 * time.Minute
-	c.Metadata.Full = true
-
-	c.Producer.MaxMessageBytes = 1000000
-	c.Producer.RequiredAcks = WaitForLocal
-	c.Producer.Timeout = 10 * time.Second
-	c.Producer.Partitioner = NewHashPartitioner
-	c.Producer.Retry.Max = 3
-	c.Producer.Retry.Backoff = 100 * time.Millisecond
-	c.Producer.Return.Errors = true
-	c.Producer.CompressionLevel = CompressionLevelDefault
-
-	c.Consumer.Fetch.Min = 1
-	c.Consumer.Fetch.Default = 1024 * 1024
-	c.Consumer.Retry.Backoff = 2 * time.Second
-	c.Consumer.MaxWaitTime = 250 * time.Millisecond
-	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
-	c.Consumer.Return.Errors = false
-	c.Consumer.Offsets.AutoCommit.Enable = true
-	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
-	c.Consumer.Offsets.Initial = OffsetNewest
-	c.Consumer.Offsets.Retry.Max = 3
-
-	c.Consumer.Group.Session.Timeout = 10 * time.Second
-	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
-	c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
-	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
-	c.Consumer.Group.Rebalance.Retry.Max = 4
-	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
-
-	c.ClientID = defaultClientID
-	c.ChannelBufferSize = 256
-	c.Version = DefaultVersion
-	c.MetricRegistry = metrics.NewRegistry()
-
-	return c
-}
-
-// Validate checks a Config instance. It will return a
-// ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
-	// some configuration values should be warned on but not fail completely, do those first
-	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
-		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
-	}
-	if !c.Net.SASL.Enable {
-		if c.Net.SASL.User != "" {
-			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
-		}
-		if c.Net.SASL.Password != "" {
-			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
-		}
-	}
-	if c.Producer.RequiredAcks > 1 {
-		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
-	}
-	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
-		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
-	}
-	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
-		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
-	}
-	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
-		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
-	}
-	if c.Producer.Timeout%time.Millisecond != 0 {
-		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
-	}
-	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
-		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
-	}
-	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
-		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
-		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.ClientID == defaultClientID {
-		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
-	}
-
-	// validate Net values
-	switch {
-	case c.Net.MaxOpenRequests <= 0:
-		return ConfigurationError("Net.MaxOpenRequests must be > 0")
-	case c.Net.DialTimeout <= 0:
-		return ConfigurationError("Net.DialTimeout must be > 0")
-	case c.Net.ReadTimeout <= 0:
-		return ConfigurationError("Net.ReadTimeout must be > 0")
-	case c.Net.WriteTimeout <= 0:
-		return ConfigurationError("Net.WriteTimeout must be > 0")
-	case c.Net.SASL.Enable:
-		if c.Net.SASL.Mechanism == "" {
-			c.Net.SASL.Mechanism = SASLTypePlaintext
-		}
-
-		switch c.Net.SASL.Mechanism {
-		case SASLTypePlaintext:
-			if c.Net.SASL.User == "" {
-				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.Password == "" {
-				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
-			}
-		case SASLTypeOAuth:
-			if c.Net.SASL.TokenProvider == nil {
-				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
-			}
-		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
-			if c.Net.SASL.User == "" {
-				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.Password == "" {
-				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
-				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
-			}
-		case SASLTypeGSSAPI:
-			if c.Net.SASL.GSSAPI.ServiceName == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
-			}
-
-			if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
-				if c.Net.SASL.GSSAPI.Password == "" {
-					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
-						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
-				}
-			} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
-				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
-					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
-						" and  Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
-				}
-			} else {
-				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
-			}
-			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
-			}
-			if c.Net.SASL.GSSAPI.Username == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
-			}
-			if c.Net.SASL.GSSAPI.Realm == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
-			}
-		default:
-			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
-				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
-			return ConfigurationError(msg)
-		}
-	}
-
-	// validate the Admin values
-	switch {
-	case c.Admin.Timeout <= 0:
-		return ConfigurationError("Admin.Timeout must be > 0")
-	}
-
-	// validate the Metadata values
-	switch {
-	case c.Metadata.Retry.Max < 0:
-		return ConfigurationError("Metadata.Retry.Max must be >= 0")
-	case c.Metadata.Retry.Backoff < 0:
-		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
-	case c.Metadata.RefreshFrequency < 0:
-		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
-	}
-
-	// validate the Producer values
-	switch {
-	case c.Producer.MaxMessageBytes <= 0:
-		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
-	case c.Producer.RequiredAcks < -1:
-		return ConfigurationError("Producer.RequiredAcks must be >= -1")
-	case c.Producer.Timeout <= 0:
-		return ConfigurationError("Producer.Timeout must be > 0")
-	case c.Producer.Partitioner == nil:
-		return ConfigurationError("Producer.Partitioner must not be nil")
-	case c.Producer.Flush.Bytes < 0:
-		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
-	case c.Producer.Flush.Messages < 0:
-		return ConfigurationError("Producer.Flush.Messages must be >= 0")
-	case c.Producer.Flush.Frequency < 0:
-		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
-	case c.Producer.Flush.MaxMessages < 0:
-		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
-	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
-		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
-	case c.Producer.Retry.Max < 0:
-		return ConfigurationError("Producer.Retry.Max must be >= 0")
-	case c.Producer.Retry.Backoff < 0:
-		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
-	}
-
-	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
-		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
-	}
-
-	if c.Producer.Compression == CompressionGZIP {
-		if c.Producer.CompressionLevel != CompressionLevelDefault {
-			if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
-				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
-			}
-		}
-	}
-
-	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
-		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
-	}
-
-	if c.Producer.Idempotent {
-		if !c.Version.IsAtLeast(V0_11_0_0) {
-			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
-		}
-		if c.Producer.Retry.Max == 0 {
-			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
-		}
-		if c.Producer.RequiredAcks != WaitForAll {
-			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
-		}
-		if c.Net.MaxOpenRequests > 1 {
-			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
-		}
-	}
-
-	// validate the Consumer values
-	switch {
-	case c.Consumer.Fetch.Min <= 0:
-		return ConfigurationError("Consumer.Fetch.Min must be > 0")
-	case c.Consumer.Fetch.Default <= 0:
-		return ConfigurationError("Consumer.Fetch.Default must be > 0")
-	case c.Consumer.Fetch.Max < 0:
-		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
-	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
-		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
-	case c.Consumer.MaxProcessingTime <= 0:
-		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
-	case c.Consumer.Retry.Backoff < 0:
-		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
-	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
-		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
-	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
-		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
-	case c.Consumer.Offsets.Retry.Max < 0:
-		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
-	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
-		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
-	}
-
-	if c.Consumer.Offsets.CommitInterval != 0 {
-		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
-			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
-	}
-
-	// validate IsolationLevel
-	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
-		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
-	}
-
-	// validate the Consumer Group values
-	switch {
-	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
-		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
-	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
-		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
-	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
-		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
-	case c.Consumer.Group.Rebalance.Strategy == nil:
-		return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
-	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
-		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
-	case c.Consumer.Group.Rebalance.Retry.Max < 0:
-		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
-	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
-		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
-	}
-
-	// validate misc shared values
-	switch {
-	case c.ChannelBufferSize < 0:
-		return ConfigurationError("ChannelBufferSize must be >= 0")
-	case !validID.MatchString(c.ClientID):
-		return ConfigurationError("ClientID is invalid")
-	}
-
-	return nil
-}
-
-func (c *Config) getDialer() proxy.Dialer {
-	if c.Net.Proxy.Enable {
-		Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
-		return c.Net.Proxy.Dialer
-	} else {
-		return &net.Dialer{
-			Timeout:   c.Net.DialTimeout,
-			KeepAlive: c.Net.KeepAlive,
-			LocalAddr: c.Net.LocalAddr,
-		}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
deleted file mode 100644
index f9cd172..0000000
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ /dev/null
@@ -1,949 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// ConsumerMessage encapsulates a Kafka message returned by the consumer.
-type ConsumerMessage struct {
-	Headers        []*RecordHeader // only set if kafka is version 0.11+
-	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
-	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
-
-	Key, Value []byte
-	Topic      string
-	Partition  int32
-	Offset     int64
-}
-
-// ConsumerError is what is provided to the user when an error occurs.
-// It wraps an error and includes the topic and partition.
-type ConsumerError struct {
-	Topic     string
-	Partition int32
-	Err       error
-}
-
-func (ce ConsumerError) Error() string {
-	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
-}
-
-func (ce ConsumerError) Unwrap() error {
-	return ce.Err
-}
-
-// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
-// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
-// when stopping.
-type ConsumerErrors []*ConsumerError
-
-func (ce ConsumerErrors) Error() string {
-	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
-}
-
-// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
-// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
-// scope.
-type Consumer interface {
-	// Topics returns the set of available topics as retrieved from the cluster
-	// metadata. This method is the same as Client.Topics(), and is provided for
-	// convenience.
-	Topics() ([]string, error)
-
-	// Partitions returns the sorted list of all partition IDs for the given topic.
-	// This method is the same as Client.Partitions(), and is provided for convenience.
-	Partitions(topic string) ([]int32, error)
-
-	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
-	// the given offset. It will return an error if this Consumer is already consuming
-	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
-	// or OffsetOldest
-	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
-
-	// HighWaterMarks returns the current high water marks for each topic and partition.
-	// Consistency between partitions is not guaranteed since high water marks are updated separately.
-	HighWaterMarks() map[string]map[int32]int64
-
-	// Close shuts down the consumer. It must be called after all child
-	// PartitionConsumers have already been closed.
-	Close() error
-}
-
-type consumer struct {
-	conf            *Config
-	children        map[string]map[int32]*partitionConsumer
-	brokerConsumers map[*Broker]*brokerConsumer
-	client          Client
-	lock            sync.Mutex
-}
-
-// NewConsumer creates a new consumer using the given broker addresses and configuration.
-func NewConsumer(addrs []string, config *Config) (Consumer, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-	return newConsumer(client)
-}
-
-// NewConsumerFromClient creates a new consumer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-func NewConsumerFromClient(client Client) (Consumer, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newConsumer(cli)
-}
-
-func newConsumer(client Client) (Consumer, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	c := &consumer{
-		client:          client,
-		conf:            client.Config(),
-		children:        make(map[string]map[int32]*partitionConsumer),
-		brokerConsumers: make(map[*Broker]*brokerConsumer),
-	}
-
-	return c, nil
-}
-
-func (c *consumer) Close() error {
-	return c.client.Close()
-}
-
-func (c *consumer) Topics() ([]string, error) {
-	return c.client.Topics()
-}
-
-func (c *consumer) Partitions(topic string) ([]int32, error) {
-	return c.client.Partitions(topic)
-}
-
-func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
-	child := &partitionConsumer{
-		consumer:  c,
-		conf:      c.conf,
-		topic:     topic,
-		partition: partition,
-		messages:  make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
-		errors:    make(chan *ConsumerError, c.conf.ChannelBufferSize),
-		feeder:    make(chan *FetchResponse, 1),
-		trigger:   make(chan none, 1),
-		dying:     make(chan none),
-		fetchSize: c.conf.Consumer.Fetch.Default,
-	}
-
-	if err := child.chooseStartingOffset(offset); err != nil {
-		return nil, err
-	}
-
-	var leader *Broker
-	var err error
-	if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
-		return nil, err
-	}
-
-	if err := c.addChild(child); err != nil {
-		return nil, err
-	}
-
-	go withRecover(child.dispatcher)
-	go withRecover(child.responseFeeder)
-
-	child.broker = c.refBrokerConsumer(leader)
-	child.broker.input <- child
-
-	return child, nil
-}
-
-func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	hwms := make(map[string]map[int32]int64)
-	for topic, p := range c.children {
-		hwm := make(map[int32]int64, len(p))
-		for partition, pc := range p {
-			hwm[partition] = pc.HighWaterMarkOffset()
-		}
-		hwms[topic] = hwm
-	}
-
-	return hwms
-}
-
-func (c *consumer) addChild(child *partitionConsumer) error {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	topicChildren := c.children[child.topic]
-	if topicChildren == nil {
-		topicChildren = make(map[int32]*partitionConsumer)
-		c.children[child.topic] = topicChildren
-	}
-
-	if topicChildren[child.partition] != nil {
-		return ConfigurationError("That topic/partition is already being consumed")
-	}
-
-	topicChildren[child.partition] = child
-	return nil
-}
-
-func (c *consumer) removeChild(child *partitionConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	delete(c.children[child.topic], child.partition)
-}
-
-func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	bc := c.brokerConsumers[broker]
-	if bc == nil {
-		bc = c.newBrokerConsumer(broker)
-		c.brokerConsumers[broker] = bc
-	}
-
-	bc.refs++
-
-	return bc
-}
-
-func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	brokerWorker.refs--
-
-	if brokerWorker.refs == 0 {
-		close(brokerWorker.input)
-		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
-			delete(c.brokerConsumers, brokerWorker.broker)
-		}
-	}
-}
-
-func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	delete(c.brokerConsumers, brokerWorker.broker)
-}
-
-// PartitionConsumer
-
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
-// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
-// of scope.
-//
-// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
-// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
-// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
-// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
-// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
-// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
-// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
-//
-// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
-// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
-// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
-// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
-// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
-type PartitionConsumer interface {
-	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
-	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
-	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
-	// this before calling Close on the underlying client.
-	AsyncClose()
-
-	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
-	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
-	// the Messages channel when this function is called, you will be competing with Close for messages; consider
-	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
-	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
-	Close() error
-
-	// Messages returns the read channel for the messages that are returned by
-	// the broker.
-	Messages() <-chan *ConsumerMessage
-
-	// Errors returns a read channel of errors that occurred during consuming, if
-	// enabled. By default, errors are logged and not returned over this channel.
-	// If you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan *ConsumerError
-
-	// HighWaterMarkOffset returns the high water mark offset of the partition,
-	// i.e. the offset that will be used for the next message that will be produced.
-	// You can use this to determine how far behind the processing is.
-	HighWaterMarkOffset() int64
-}
-
-type partitionConsumer struct {
-	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
-	consumer *consumer
-	conf     *Config
-	broker   *brokerConsumer
-	messages chan *ConsumerMessage
-	errors   chan *ConsumerError
-	feeder   chan *FetchResponse
-
-	preferredReadReplica int32
-
-	trigger, dying chan none
-	closeOnce      sync.Once
-	topic          string
-	partition      int32
-	responseResult error
-	fetchSize      int32
-	offset         int64
-	retries        int32
-}
-
-var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
-
-func (child *partitionConsumer) sendError(err error) {
-	cErr := &ConsumerError{
-		Topic:     child.topic,
-		Partition: child.partition,
-		Err:       err,
-	}
-
-	if child.conf.Consumer.Return.Errors {
-		child.errors <- cErr
-	} else {
-		Logger.Println(cErr)
-	}
-}
-
-func (child *partitionConsumer) computeBackoff() time.Duration {
-	if child.conf.Consumer.Retry.BackoffFunc != nil {
-		retries := atomic.AddInt32(&child.retries, 1)
-		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
-	}
-	return child.conf.Consumer.Retry.Backoff
-}
-
-func (child *partitionConsumer) dispatcher() {
-	for range child.trigger {
-		select {
-		case <-child.dying:
-			close(child.trigger)
-		case <-time.After(child.computeBackoff()):
-			if child.broker != nil {
-				child.consumer.unrefBrokerConsumer(child.broker)
-				child.broker = nil
-			}
-
-			Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
-			if err := child.dispatch(); err != nil {
-				child.sendError(err)
-				child.trigger <- none{}
-			}
-		}
-	}
-
-	if child.broker != nil {
-		child.consumer.unrefBrokerConsumer(child.broker)
-	}
-	child.consumer.removeChild(child)
-	close(child.feeder)
-}
-
-func (child *partitionConsumer) preferredBroker() (*Broker, error) {
-	if child.preferredReadReplica >= 0 {
-		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
-		if err == nil {
-			return broker, nil
-		}
-	}
-
-	// if prefered replica cannot be found fallback to leader
-	return child.consumer.client.Leader(child.topic, child.partition)
-}
-
-func (child *partitionConsumer) dispatch() error {
-	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
-		return err
-	}
-
-	broker, err := child.preferredBroker()
-	if err != nil {
-		return err
-	}
-
-	child.broker = child.consumer.refBrokerConsumer(broker)
-
-	child.broker.input <- child
-
-	return nil
-}
-
-func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
-	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
-	if err != nil {
-		return err
-	}
-	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
-	if err != nil {
-		return err
-	}
-
-	switch {
-	case offset == OffsetNewest:
-		child.offset = newestOffset
-	case offset == OffsetOldest:
-		child.offset = oldestOffset
-	case offset >= oldestOffset && offset <= newestOffset:
-		child.offset = offset
-	default:
-		return ErrOffsetOutOfRange
-	}
-
-	return nil
-}
-
-func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
-	return child.messages
-}
-
-func (child *partitionConsumer) Errors() <-chan *ConsumerError {
-	return child.errors
-}
-
-func (child *partitionConsumer) AsyncClose() {
-	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
-	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
-	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
-	// also just close itself)
-	child.closeOnce.Do(func() {
-		close(child.dying)
-	})
-}
-
-func (child *partitionConsumer) Close() error {
-	child.AsyncClose()
-
-	var consumerErrors ConsumerErrors
-	for err := range child.errors {
-		consumerErrors = append(consumerErrors, err)
-	}
-
-	if len(consumerErrors) > 0 {
-		return consumerErrors
-	}
-	return nil
-}
-
-func (child *partitionConsumer) HighWaterMarkOffset() int64 {
-	return atomic.LoadInt64(&child.highWaterMarkOffset)
-}
-
-func (child *partitionConsumer) responseFeeder() {
-	var msgs []*ConsumerMessage
-	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
-	firstAttempt := true
-
-feederLoop:
-	for response := range child.feeder {
-		msgs, child.responseResult = child.parseResponse(response)
-
-		if child.responseResult == nil {
-			atomic.StoreInt32(&child.retries, 0)
-		}
-
-		for i, msg := range msgs {
-			child.interceptors(msg)
-		messageSelect:
-			select {
-			case <-child.dying:
-				child.broker.acks.Done()
-				continue feederLoop
-			case child.messages <- msg:
-				firstAttempt = true
-			case <-expiryTicker.C:
-				if !firstAttempt {
-					child.responseResult = errTimedOut
-					child.broker.acks.Done()
-				remainingLoop:
-					for _, msg = range msgs[i:] {
-						child.interceptors(msg)
-						select {
-						case child.messages <- msg:
-						case <-child.dying:
-							break remainingLoop
-						}
-					}
-					child.broker.input <- child
-					continue feederLoop
-				} else {
-					// current message has not been sent, return to select
-					// statement
-					firstAttempt = false
-					goto messageSelect
-				}
-			}
-		}
-
-		child.broker.acks.Done()
-	}
-
-	expiryTicker.Stop()
-	close(child.messages)
-	close(child.errors)
-}
-
-func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
-	var messages []*ConsumerMessage
-	for _, msgBlock := range msgSet.Messages {
-		for _, msg := range msgBlock.Messages() {
-			offset := msg.Offset
-			timestamp := msg.Msg.Timestamp
-			if msg.Msg.Version >= 1 {
-				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
-				offset += baseOffset
-				if msg.Msg.LogAppendTime {
-					timestamp = msgBlock.Msg.Timestamp
-				}
-			}
-			if offset < child.offset {
-				continue
-			}
-			messages = append(messages, &ConsumerMessage{
-				Topic:          child.topic,
-				Partition:      child.partition,
-				Key:            msg.Msg.Key,
-				Value:          msg.Msg.Value,
-				Offset:         offset,
-				Timestamp:      timestamp,
-				BlockTimestamp: msgBlock.Msg.Timestamp,
-			})
-			child.offset = offset + 1
-		}
-	}
-	if len(messages) == 0 {
-		child.offset++
-	}
-	return messages, nil
-}
-
-func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
-	messages := make([]*ConsumerMessage, 0, len(batch.Records))
-
-	for _, rec := range batch.Records {
-		offset := batch.FirstOffset + rec.OffsetDelta
-		if offset < child.offset {
-			continue
-		}
-		timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
-		if batch.LogAppendTime {
-			timestamp = batch.MaxTimestamp
-		}
-		messages = append(messages, &ConsumerMessage{
-			Topic:     child.topic,
-			Partition: child.partition,
-			Key:       rec.Key,
-			Value:     rec.Value,
-			Offset:    offset,
-			Timestamp: timestamp,
-			Headers:   rec.Headers,
-		})
-		child.offset = offset + 1
-	}
-	if len(messages) == 0 {
-		child.offset++
-	}
-	return messages, nil
-}
-
-func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
-	var (
-		metricRegistry          = child.conf.MetricRegistry
-		consumerBatchSizeMetric metrics.Histogram
-	)
-
-	if metricRegistry != nil {
-		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
-	}
-
-	// If request was throttled and empty we log and return without error
-	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
-		Logger.Printf(
-			"consumer/broker/%d FetchResponse throttled %v\n",
-			child.broker.broker.ID(), response.ThrottleTime)
-		return nil, nil
-	}
-
-	block := response.GetBlock(child.topic, child.partition)
-	if block == nil {
-		return nil, ErrIncompleteResponse
-	}
-
-	if block.Err != ErrNoError {
-		return nil, block.Err
-	}
-
-	nRecs, err := block.numRecords()
-	if err != nil {
-		return nil, err
-	}
-
-	consumerBatchSizeMetric.Update(int64(nRecs))
-
-	child.preferredReadReplica = block.PreferredReadReplica
-
-	if nRecs == 0 {
-		partialTrailingMessage, err := block.isPartial()
-		if err != nil {
-			return nil, err
-		}
-		// We got no messages. If we got a trailing one then we need to ask for more data.
-		// Otherwise we just poll again and wait for one to be produced...
-		if partialTrailingMessage {
-			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
-				// we can't ask for more data, we've hit the configured limit
-				child.sendError(ErrMessageTooLarge)
-				child.offset++ // skip this one so we can keep processing future messages
-			} else {
-				child.fetchSize *= 2
-				// check int32 overflow
-				if child.fetchSize < 0 {
-					child.fetchSize = math.MaxInt32
-				}
-				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
-					child.fetchSize = child.conf.Consumer.Fetch.Max
-				}
-			}
-		}
-
-		return nil, nil
-	}
-
-	// we got messages, reset our fetch size in case it was increased for a previous request
-	child.fetchSize = child.conf.Consumer.Fetch.Default
-	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
-
-	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
-	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
-	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
-	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
-	abortedTransactions := block.getAbortedTransactions()
-
-	var messages []*ConsumerMessage
-	for _, records := range block.RecordsSet {
-		switch records.recordsType {
-		case legacyRecords:
-			messageSetMessages, err := child.parseMessages(records.MsgSet)
-			if err != nil {
-				return nil, err
-			}
-
-			messages = append(messages, messageSetMessages...)
-		case defaultRecords:
-			// Consume remaining abortedTransaction up to last offset of current batch
-			for _, txn := range abortedTransactions {
-				if txn.FirstOffset > records.RecordBatch.LastOffset() {
-					break
-				}
-				abortedProducerIDs[txn.ProducerID] = struct{}{}
-				// Pop abortedTransactions so that we never add it again
-				abortedTransactions = abortedTransactions[1:]
-			}
-
-			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
-			if err != nil {
-				return nil, err
-			}
-
-			// Parse and commit offset but do not expose messages that are:
-			// - control records
-			// - part of an aborted transaction when set to `ReadCommitted`
-
-			// control record
-			isControl, err := records.isControl()
-			if err != nil {
-				// I don't know why there is this continue in case of error to begin with
-				// Safe bet is to ignore control messages if ReadUncommitted
-				// and block on them in case of error and ReadCommitted
-				if child.conf.Consumer.IsolationLevel == ReadCommitted {
-					return nil, err
-				}
-				continue
-			}
-			if isControl {
-				controlRecord, err := records.getControlRecord()
-				if err != nil {
-					return nil, err
-				}
-
-				if controlRecord.Type == ControlRecordAbort {
-					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
-				}
-				continue
-			}
-
-			// filter aborted transactions
-			if child.conf.Consumer.IsolationLevel == ReadCommitted {
-				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
-				if records.RecordBatch.IsTransactional && isAborted {
-					continue
-				}
-			}
-
-			messages = append(messages, recordBatchMessages...)
-		default:
-			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
-		}
-	}
-
-	return messages, nil
-}
-
-func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
-	for _, interceptor := range child.conf.Consumer.Interceptors {
-		msg.safelyApplyInterceptor(interceptor)
-	}
-}
-
-type brokerConsumer struct {
-	consumer         *consumer
-	broker           *Broker
-	input            chan *partitionConsumer
-	newSubscriptions chan []*partitionConsumer
-	subscriptions    map[*partitionConsumer]none
-	wait             chan none
-	acks             sync.WaitGroup
-	refs             int
-}
-
-func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
-	bc := &brokerConsumer{
-		consumer:         c,
-		broker:           broker,
-		input:            make(chan *partitionConsumer),
-		newSubscriptions: make(chan []*partitionConsumer),
-		wait:             make(chan none),
-		subscriptions:    make(map[*partitionConsumer]none),
-		refs:             0,
-	}
-
-	go withRecover(bc.subscriptionManager)
-	go withRecover(bc.subscriptionConsumer)
-
-	return bc
-}
-
-// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
-// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
-// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
-// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
-// so the main goroutine can block waiting for work if it has none.
-func (bc *brokerConsumer) subscriptionManager() {
-	var buffer []*partitionConsumer
-
-	for {
-		if len(buffer) > 0 {
-			select {
-			case event, ok := <-bc.input:
-				if !ok {
-					goto done
-				}
-				buffer = append(buffer, event)
-			case bc.newSubscriptions <- buffer:
-				buffer = nil
-			case bc.wait <- none{}:
-			}
-		} else {
-			select {
-			case event, ok := <-bc.input:
-				if !ok {
-					goto done
-				}
-				buffer = append(buffer, event)
-			case bc.newSubscriptions <- nil:
-			}
-		}
-	}
-
-done:
-	close(bc.wait)
-	if len(buffer) > 0 {
-		bc.newSubscriptions <- buffer
-	}
-	close(bc.newSubscriptions)
-}
-
-// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
-func (bc *brokerConsumer) subscriptionConsumer() {
-	<-bc.wait // wait for our first piece of work
-
-	for newSubscriptions := range bc.newSubscriptions {
-		bc.updateSubscriptions(newSubscriptions)
-
-		if len(bc.subscriptions) == 0 {
-			// We're about to be shut down or we're about to receive more subscriptions.
-			// Either way, the signal just hasn't propagated to our goroutine yet.
-			<-bc.wait
-			continue
-		}
-
-		response, err := bc.fetchNewMessages()
-		if err != nil {
-			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
-			bc.abort(err)
-			return
-		}
-
-		bc.acks.Add(len(bc.subscriptions))
-		for child := range bc.subscriptions {
-			child.feeder <- response
-		}
-		bc.acks.Wait()
-		bc.handleResponses()
-	}
-}
-
-func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
-	for _, child := range newSubscriptions {
-		bc.subscriptions[child] = none{}
-		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
-	}
-
-	for child := range bc.subscriptions {
-		select {
-		case <-child.dying:
-			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
-			close(child.trigger)
-			delete(bc.subscriptions, child)
-		default:
-			// no-op
-		}
-	}
-}
-
-// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
-func (bc *brokerConsumer) handleResponses() {
-	for child := range bc.subscriptions {
-		result := child.responseResult
-		child.responseResult = nil
-
-		if result == nil {
-			if preferredBroker, err := child.preferredBroker(); err == nil {
-				if bc.broker.ID() != preferredBroker.ID() {
-					// not an error but needs redispatching to consume from prefered replica
-					child.trigger <- none{}
-					delete(bc.subscriptions, child)
-				}
-			}
-			continue
-		}
-
-		// Discard any replica preference.
-		child.preferredReadReplica = -1
-
-		switch result {
-		case errTimedOut:
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
-				bc.broker.ID(), child.topic, child.partition)
-			delete(bc.subscriptions, child)
-		case ErrOffsetOutOfRange:
-			// there's no point in retrying this it will just fail the same way again
-			// shut it down and force the user to choose what to do
-			child.sendError(result)
-			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
-			close(child.trigger)
-			delete(bc.subscriptions, child)
-		case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
-			// not an error, but does need redispatching
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
-				bc.broker.ID(), child.topic, child.partition, result)
-			child.trigger <- none{}
-			delete(bc.subscriptions, child)
-		default:
-			// dunno, tell the user and try redispatching
-			child.sendError(result)
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
-				bc.broker.ID(), child.topic, child.partition, result)
-			child.trigger <- none{}
-			delete(bc.subscriptions, child)
-		}
-	}
-}
-
-func (bc *brokerConsumer) abort(err error) {
-	bc.consumer.abandonBrokerConsumer(bc)
-	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
-
-	for child := range bc.subscriptions {
-		child.sendError(err)
-		child.trigger <- none{}
-	}
-
-	for newSubscriptions := range bc.newSubscriptions {
-		if len(newSubscriptions) == 0 {
-			<-bc.wait
-			continue
-		}
-		for _, child := range newSubscriptions {
-			child.sendError(err)
-			child.trigger <- none{}
-		}
-	}
-}
-
-func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
-	request := &FetchRequest{
-		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
-		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
-		request.Version = 1
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
-		request.Version = 2
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
-		request.Version = 3
-		request.MaxBytes = MaxResponseSize
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 4
-		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
-		request.Version = 7
-		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
-		// and the epoch to -1 tells the broker not to generate as session ID we're going
-		// to just ignore anyway.
-		request.SessionID = 0
-		request.SessionEpoch = -1
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
-		request.Version = 10
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
-		request.Version = 11
-		request.RackID = bc.consumer.conf.RackID
-	}
-
-	for child := range bc.subscriptions {
-		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
-	}
-
-	return bc.broker.Fetch(request)
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
deleted file mode 100644
index 2bf236a..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ /dev/null
@@ -1,879 +0,0 @@
-package sarama
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sort"
-	"sync"
-	"time"
-)
-
-// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
-var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
-
-// ConsumerGroup is responsible for dividing up processing of topics and partitions
-// over a collection of processes (the members of the consumer group).
-type ConsumerGroup interface {
-	// Consume joins a cluster of consumers for a given list of topics and
-	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
-	//
-	// The life-cycle of a session is represented by the following steps:
-	//
-	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
-	//    and is assigned their "fair share" of partitions, aka 'claims'.
-	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
-	//    of the claims and allow any necessary preparation or alteration of state.
-	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
-	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
-	//    from concurrent reads/writes.
-	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
-	//    parent context is cancelled or when a server-side rebalance cycle is initiated.
-	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
-	//    to allow the user to perform any final tasks before a rebalance.
-	// 6. Finally, marked offsets are committed one last time before claims are released.
-	//
-	// Please note, that once a rebalance is triggered, sessions must be completed within
-	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
-	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
-	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
-	// commit failures.
-	// This method should be called inside an infinite loop, when a
-	// server-side rebalance happens, the consumer session will need to be
-	// recreated to get the new claims.
-	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
-
-	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
-	// By default, errors are logged and not returned over this channel.
-	// If you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan error
-
-	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
-	// this function before the object passes out of scope, as it will otherwise leak memory.
-	Close() error
-}
-
-type consumerGroup struct {
-	client Client
-
-	config   *Config
-	consumer Consumer
-	groupID  string
-	memberID string
-	errors   chan error
-
-	lock      sync.Mutex
-	closed    chan none
-	closeOnce sync.Once
-
-	userData []byte
-}
-
-// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
-func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-
-	c, err := newConsumerGroup(groupID, client)
-	if err != nil {
-		_ = client.Close()
-	}
-	return c, err
-}
-
-// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-// PLEASE NOTE: consumer groups can only re-use but not share clients.
-func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newConsumerGroup(groupID, cli)
-}
-
-func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
-	config := client.Config()
-	if !config.Version.IsAtLeast(V0_10_2_0) {
-		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
-	}
-
-	consumer, err := NewConsumerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-
-	return &consumerGroup{
-		client:   client,
-		consumer: consumer,
-		config:   config,
-		groupID:  groupID,
-		errors:   make(chan error, config.ChannelBufferSize),
-		closed:   make(chan none),
-	}, nil
-}
-
-// Errors implements ConsumerGroup.
-func (c *consumerGroup) Errors() <-chan error { return c.errors }
-
-// Close implements ConsumerGroup.
-func (c *consumerGroup) Close() (err error) {
-	c.closeOnce.Do(func() {
-		close(c.closed)
-
-		// leave group
-		if e := c.leave(); e != nil {
-			err = e
-		}
-
-		// drain errors
-		go func() {
-			close(c.errors)
-		}()
-		for e := range c.errors {
-			err = e
-		}
-
-		if e := c.client.Close(); e != nil {
-			err = e
-		}
-	})
-	return
-}
-
-// Consume implements ConsumerGroup.
-func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
-	// Ensure group is not closed
-	select {
-	case <-c.closed:
-		return ErrClosedConsumerGroup
-	default:
-	}
-
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	// Quick exit when no topics are provided
-	if len(topics) == 0 {
-		return fmt.Errorf("no topics provided")
-	}
-
-	// Refresh metadata for requested topics
-	if err := c.client.RefreshMetadata(topics...); err != nil {
-		return err
-	}
-
-	// Init session
-	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
-	if err == ErrClosedClient {
-		return ErrClosedConsumerGroup
-	} else if err != nil {
-		return err
-	}
-
-	// loop check topic partition numbers changed
-	// will trigger rebalance when any topic partitions number had changed
-	// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
-	go c.loopCheckPartitionNumbers(topics, sess)
-
-	// Wait for session exit signal
-	<-sess.ctx.Done()
-
-	// Gracefully release session claims
-	return sess.release(true)
-}
-
-func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
-	select {
-	case <-c.closed:
-		return nil, ErrClosedConsumerGroup
-	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
-	}
-
-	if refreshCoordinator {
-		err := c.client.RefreshCoordinator(c.groupID)
-		if err != nil {
-			return c.retryNewSession(ctx, topics, handler, retries, true)
-		}
-	}
-
-	return c.newSession(ctx, topics, handler, retries-1)
-}
-
-func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
-	coordinator, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		if retries <= 0 {
-			return nil, err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	}
-
-	// Join consumer group
-	join, err := c.joinGroupRequest(coordinator, topics)
-	if err != nil {
-		_ = coordinator.Close()
-		return nil, err
-	}
-	switch join.Err {
-	case ErrNoError:
-		c.memberID = join.MemberId
-	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
-		c.memberID = ""
-		return c.newSession(ctx, topics, handler, retries)
-	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
-		if retries <= 0 {
-			return nil, join.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	case ErrRebalanceInProgress: // retry after backoff
-		if retries <= 0 {
-			return nil, join.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, false)
-	default:
-		return nil, join.Err
-	}
-
-	// Prepare distribution plan if we joined as the leader
-	var plan BalanceStrategyPlan
-	if join.LeaderId == join.MemberId {
-		members, err := join.GetMembers()
-		if err != nil {
-			return nil, err
-		}
-
-		plan, err = c.balance(members)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	// Sync consumer group
-	groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
-	if err != nil {
-		_ = coordinator.Close()
-		return nil, err
-	}
-	switch groupRequest.Err {
-	case ErrNoError:
-	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
-		c.memberID = ""
-		return c.newSession(ctx, topics, handler, retries)
-	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
-		if retries <= 0 {
-			return nil, groupRequest.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	case ErrRebalanceInProgress: // retry after backoff
-		if retries <= 0 {
-			return nil, groupRequest.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, false)
-	default:
-		return nil, groupRequest.Err
-	}
-
-	// Retrieve and sort claims
-	var claims map[string][]int32
-	if len(groupRequest.MemberAssignment) > 0 {
-		members, err := groupRequest.GetMemberAssignment()
-		if err != nil {
-			return nil, err
-		}
-		claims = members.Topics
-		c.userData = members.UserData
-
-		for _, partitions := range claims {
-			sort.Sort(int32Slice(partitions))
-		}
-	}
-
-	return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
-}
-
-func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
-	req := &JoinGroupRequest{
-		GroupId:        c.groupID,
-		MemberId:       c.memberID,
-		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
-		ProtocolType:   "consumer",
-	}
-	if c.config.Version.IsAtLeast(V0_10_1_0) {
-		req.Version = 1
-		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
-	}
-
-	// use static user-data if configured, otherwise use consumer-group userdata from the last sync
-	userData := c.config.Consumer.Group.Member.UserData
-	if len(userData) == 0 {
-		userData = c.userData
-	}
-	meta := &ConsumerGroupMemberMetadata{
-		Topics:   topics,
-		UserData: userData,
-	}
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
-		return nil, err
-	}
-
-	return coordinator.JoinGroup(req)
-}
-
-func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
-	req := &SyncGroupRequest{
-		GroupId:      c.groupID,
-		MemberId:     c.memberID,
-		GenerationId: generationID,
-	}
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	for memberID, topics := range plan {
-		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
-		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
-		if err != nil {
-			return nil, err
-		}
-		assignment.UserData = userDataBytes
-		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
-			return nil, err
-		}
-	}
-	return coordinator.SyncGroup(req)
-}
-
-func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
-	req := &HeartbeatRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	}
-
-	return coordinator.Heartbeat(req)
-}
-
-func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
-	topics := make(map[string][]int32)
-	for _, meta := range members {
-		for _, topic := range meta.Topics {
-			topics[topic] = nil
-		}
-	}
-
-	for topic := range topics {
-		partitions, err := c.client.Partitions(topic)
-		if err != nil {
-			return nil, err
-		}
-		topics[topic] = partitions
-	}
-
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	return strategy.Plan(members, topics)
-}
-
-// Leaves the cluster, called by Close.
-func (c *consumerGroup) leave() error {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-	if c.memberID == "" {
-		return nil
-	}
-
-	coordinator, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		return err
-	}
-
-	resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
-		GroupId:  c.groupID,
-		MemberId: c.memberID,
-	})
-	if err != nil {
-		_ = coordinator.Close()
-		return err
-	}
-
-	// Unset memberID
-	c.memberID = ""
-
-	// Check response
-	switch resp.Err {
-	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
-		return nil
-	default:
-		return resp.Err
-	}
-}
-
-func (c *consumerGroup) handleError(err error, topic string, partition int32) {
-	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
-		err = &ConsumerError{
-			Topic:     topic,
-			Partition: partition,
-			Err:       err,
-		}
-	}
-
-	if !c.config.Consumer.Return.Errors {
-		Logger.Println(err)
-		return
-	}
-
-	select {
-	case <-c.closed:
-		// consumer is closed
-		return
-	default:
-	}
-
-	select {
-	case c.errors <- err:
-	default:
-		// no error listener
-	}
-}
-
-func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
-	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
-	defer session.cancel()
-	defer pause.Stop()
-	var oldTopicToPartitionNum map[string]int
-	var err error
-	if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
-		return
-	}
-	for {
-		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
-			return
-		} else {
-			for topic, num := range oldTopicToPartitionNum {
-				if newTopicToPartitionNum[topic] != num {
-					return // trigger the end of the session on exit
-				}
-			}
-		}
-		select {
-		case <-pause.C:
-		case <-session.ctx.Done():
-			Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
-			// if session closed by other, should be exited
-			return
-		case <-c.closed:
-			return
-		}
-	}
-}
-
-func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
-	topicToPartitionNum := make(map[string]int, len(topics))
-	for _, topic := range topics {
-		if partitionNum, err := c.client.Partitions(topic); err != nil {
-			Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
-			return nil, err
-		} else {
-			topicToPartitionNum[topic] = len(partitionNum)
-		}
-	}
-	return topicToPartitionNum, nil
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupSession represents a consumer group member session.
-type ConsumerGroupSession interface {
-	// Claims returns information about the claimed partitions by topic.
-	Claims() map[string][]int32
-
-	// MemberID returns the cluster member ID.
-	MemberID() string
-
-	// GenerationID returns the current generation ID.
-	GenerationID() int32
-
-	// MarkOffset marks the provided offset, alongside a metadata string
-	// that represents the state of the partition consumer at that point in time. The
-	// metadata string can be used by another consumer to restore that state, so it
-	// can resume consumption.
-	//
-	// To follow upstream conventions, you are expected to mark the offset of the
-	// next message to read, not the last message read. Thus, when calling `MarkOffset`
-	// you should typically add one to the offset of the last consumed message.
-	//
-	// Note: calling MarkOffset does not necessarily commit the offset to the backend
-	// store immediately for efficiency reasons, and it may never be committed if
-	// your application crashes. This means that you may end up processing the same
-	// message twice, and your processing should ideally be idempotent.
-	MarkOffset(topic string, partition int32, offset int64, metadata string)
-
-	// Commit the offset to the backend
-	//
-	// Note: calling Commit performs a blocking synchronous operation.
-	Commit()
-
-	// ResetOffset resets to the provided offset, alongside a metadata string that
-	// represents the state of the partition consumer at that point in time. Reset
-	// acts as a counterpart to MarkOffset, the difference being that it allows to
-	// reset an offset to an earlier or smaller value, where MarkOffset only
-	// allows incrementing the offset. cf MarkOffset for more details.
-	ResetOffset(topic string, partition int32, offset int64, metadata string)
-
-	// MarkMessage marks a message as consumed.
-	MarkMessage(msg *ConsumerMessage, metadata string)
-
-	// Context returns the session context.
-	Context() context.Context
-}
-
-type consumerGroupSession struct {
-	parent       *consumerGroup
-	memberID     string
-	generationID int32
-	handler      ConsumerGroupHandler
-
-	claims  map[string][]int32
-	offsets *offsetManager
-	ctx     context.Context
-	cancel  func()
-
-	waitGroup       sync.WaitGroup
-	releaseOnce     sync.Once
-	hbDying, hbDead chan none
-}
-
-func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
-	// init offset manager
-	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
-	if err != nil {
-		return nil, err
-	}
-
-	// init context
-	ctx, cancel := context.WithCancel(ctx)
-
-	// init session
-	sess := &consumerGroupSession{
-		parent:       parent,
-		memberID:     memberID,
-		generationID: generationID,
-		handler:      handler,
-		offsets:      offsets,
-		claims:       claims,
-		ctx:          ctx,
-		cancel:       cancel,
-		hbDying:      make(chan none),
-		hbDead:       make(chan none),
-	}
-
-	// start heartbeat loop
-	go sess.heartbeatLoop()
-
-	// create a POM for each claim
-	for topic, partitions := range claims {
-		for _, partition := range partitions {
-			pom, err := offsets.ManagePartition(topic, partition)
-			if err != nil {
-				_ = sess.release(false)
-				return nil, err
-			}
-
-			// handle POM errors
-			go func(topic string, partition int32) {
-				for err := range pom.Errors() {
-					sess.parent.handleError(err, topic, partition)
-				}
-			}(topic, partition)
-		}
-	}
-
-	// perform setup
-	if err := handler.Setup(sess); err != nil {
-		_ = sess.release(true)
-		return nil, err
-	}
-
-	// start consuming
-	for topic, partitions := range claims {
-		for _, partition := range partitions {
-			sess.waitGroup.Add(1)
-
-			go func(topic string, partition int32) {
-				defer sess.waitGroup.Done()
-
-				// cancel the as session as soon as the first
-				// goroutine exits
-				defer sess.cancel()
-
-				// consume a single topic/partition, blocking
-				sess.consume(topic, partition)
-			}(topic, partition)
-		}
-	}
-	return sess, nil
-}
-
-func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
-func (s *consumerGroupSession) MemberID() string           { return s.memberID }
-func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
-
-func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		pom.MarkOffset(offset, metadata)
-	}
-}
-
-func (s *consumerGroupSession) Commit() {
-	s.offsets.Commit()
-}
-
-func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		pom.ResetOffset(offset, metadata)
-	}
-}
-
-func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
-	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
-}
-
-func (s *consumerGroupSession) Context() context.Context {
-	return s.ctx
-}
-
-func (s *consumerGroupSession) consume(topic string, partition int32) {
-	// quick exit if rebalance is due
-	select {
-	case <-s.ctx.Done():
-		return
-	case <-s.parent.closed:
-		return
-	default:
-	}
-
-	// get next offset
-	offset := s.parent.config.Consumer.Offsets.Initial
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		offset, _ = pom.NextOffset()
-	}
-
-	// create new claim
-	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
-	if err != nil {
-		s.parent.handleError(err, topic, partition)
-		return
-	}
-
-	// handle errors
-	go func() {
-		for err := range claim.Errors() {
-			s.parent.handleError(err, topic, partition)
-		}
-	}()
-
-	// trigger close when session is done
-	go func() {
-		select {
-		case <-s.ctx.Done():
-		case <-s.parent.closed:
-		}
-		claim.AsyncClose()
-	}()
-
-	// start processing
-	if err := s.handler.ConsumeClaim(s, claim); err != nil {
-		s.parent.handleError(err, topic, partition)
-	}
-
-	// ensure consumer is closed & drained
-	claim.AsyncClose()
-	for _, err := range claim.waitClosed() {
-		s.parent.handleError(err, topic, partition)
-	}
-}
-
-func (s *consumerGroupSession) release(withCleanup bool) (err error) {
-	// signal release, stop heartbeat
-	s.cancel()
-
-	// wait for consumers to exit
-	s.waitGroup.Wait()
-
-	// perform release
-	s.releaseOnce.Do(func() {
-		if withCleanup {
-			if e := s.handler.Cleanup(s); e != nil {
-				s.parent.handleError(e, "", -1)
-				err = e
-			}
-		}
-
-		if e := s.offsets.Close(); e != nil {
-			err = e
-		}
-
-		close(s.hbDying)
-		<-s.hbDead
-	})
-
-	return
-}
-
-func (s *consumerGroupSession) heartbeatLoop() {
-	defer close(s.hbDead)
-	defer s.cancel() // trigger the end of the session on exit
-
-	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
-	defer pause.Stop()
-
-	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
-	defer retryBackoff.Stop()
-
-	retries := s.parent.config.Metadata.Retry.Max
-	for {
-		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
-		if err != nil {
-			if retries <= 0 {
-				s.parent.handleError(err, "", -1)
-				return
-			}
-			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
-			select {
-			case <-s.hbDying:
-				return
-			case <-retryBackoff.C:
-				retries--
-			}
-			continue
-		}
-
-		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
-		if err != nil {
-			_ = coordinator.Close()
-
-			if retries <= 0 {
-				s.parent.handleError(err, "", -1)
-				return
-			}
-
-			retries--
-			continue
-		}
-
-		switch resp.Err {
-		case ErrNoError:
-			retries = s.parent.config.Metadata.Retry.Max
-		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
-			return
-		default:
-			s.parent.handleError(resp.Err, "", -1)
-			return
-		}
-
-		select {
-		case <-pause.C:
-		case <-s.hbDying:
-			return
-		}
-	}
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
-// It also provides hooks for your consumer group session life-cycle and allow you to
-// trigger logic before or after the consume loop(s).
-//
-// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
-// ensure that all state is safely protected against race conditions.
-type ConsumerGroupHandler interface {
-	// Setup is run at the beginning of a new session, before ConsumeClaim.
-	Setup(ConsumerGroupSession) error
-
-	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
-	// but before the offsets are committed for the very last time.
-	Cleanup(ConsumerGroupSession) error
-
-	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
-	// Once the Messages() channel is closed, the Handler must finish its processing
-	// loop and exit.
-	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
-}
-
-// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
-type ConsumerGroupClaim interface {
-	// Topic returns the consumed topic name.
-	Topic() string
-
-	// Partition returns the consumed partition.
-	Partition() int32
-
-	// InitialOffset returns the initial offset that was used as a starting point for this claim.
-	InitialOffset() int64
-
-	// HighWaterMarkOffset returns the high water mark offset of the partition,
-	// i.e. the offset that will be used for the next message that will be produced.
-	// You can use this to determine how far behind the processing is.
-	HighWaterMarkOffset() int64
-
-	// Messages returns the read channel for the messages that are returned by
-	// the broker. The messages channel will be closed when a new rebalance cycle
-	// is due. You must finish processing and mark offsets within
-	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
-	// re-assigned to another group member.
-	Messages() <-chan *ConsumerMessage
-}
-
-type consumerGroupClaim struct {
-	topic     string
-	partition int32
-	offset    int64
-	PartitionConsumer
-}
-
-func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
-	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
-	if err == ErrOffsetOutOfRange {
-		offset = sess.parent.config.Consumer.Offsets.Initial
-		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	go func() {
-		for err := range pcm.Errors() {
-			sess.parent.handleError(err, topic, partition)
-		}
-	}()
-
-	return &consumerGroupClaim{
-		topic:             topic,
-		partition:         partition,
-		offset:            offset,
-		PartitionConsumer: pcm,
-	}, nil
-}
-
-func (c *consumerGroupClaim) Topic() string        { return c.topic }
-func (c *consumerGroupClaim) Partition() int32     { return c.partition }
-func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
-
-// Drains messages and errors, ensures the claim is fully closed.
-func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
-	go func() {
-		for range c.Messages() {
-		}
-	}()
-
-	for err := range c.Errors() {
-		errs = append(errs, err)
-	}
-	return
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
deleted file mode 100644
index 21b11e9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-// ConsumerGroupMemberMetadata holds the metadata for consumer group
-type ConsumerGroupMemberMetadata struct {
-	Version  int16
-	Topics   []string
-	UserData []byte
-}
-
-func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
-	pe.putInt16(m.Version)
-
-	if err := pe.putStringArray(m.Topics); err != nil {
-		return err
-	}
-
-	if err := pe.putBytes(m.UserData); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
-	if m.Version, err = pd.getInt16(); err != nil {
-		return
-	}
-
-	if m.Topics, err = pd.getStringArray(); err != nil {
-		return
-	}
-
-	if m.UserData, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-// ConsumerGroupMemberAssignment holds the member assignment for a consume group
-type ConsumerGroupMemberAssignment struct {
-	Version  int16
-	Topics   map[string][]int32
-	UserData []byte
-}
-
-func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
-	pe.putInt16(m.Version)
-
-	if err := pe.putArrayLength(len(m.Topics)); err != nil {
-		return err
-	}
-
-	for topic, partitions := range m.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putInt32Array(partitions); err != nil {
-			return err
-		}
-	}
-
-	if err := pe.putBytes(m.UserData); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
-	if m.Version, err = pd.getInt16(); err != nil {
-		return
-	}
-
-	var topicLen int
-	if topicLen, err = pd.getArrayLength(); err != nil {
-		return
-	}
-
-	m.Topics = make(map[string][]int32, topicLen)
-	for i := 0; i < topicLen; i++ {
-		var topic string
-		if topic, err = pd.getString(); err != nil {
-			return
-		}
-		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
-			return
-		}
-	}
-
-	if m.UserData, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
deleted file mode 100644
index 5c18e04..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-// ConsumerMetadataRequest is used for metadata requests
-type ConsumerMetadataRequest struct {
-	ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
-	tmp := new(FindCoordinatorRequest)
-	tmp.CoordinatorKey = r.ConsumerGroup
-	tmp.CoordinatorType = CoordinatorGroup
-	return tmp.encode(pe)
-}
-
-func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
-	tmp := new(FindCoordinatorRequest)
-	if err := tmp.decode(pd, version); err != nil {
-		return err
-	}
-	r.ConsumerGroup = tmp.CoordinatorKey
-	return nil
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
-	return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
-	return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
deleted file mode 100644
index 7fe0cf9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import (
-	"net"
-	"strconv"
-)
-
-// ConsumerMetadataResponse holds the response for a consumer group meta data requests
-type ConsumerMetadataResponse struct {
-	Err             KError
-	Coordinator     *Broker
-	CoordinatorID   int32  // deprecated: use Coordinator.ID()
-	CoordinatorHost string // deprecated: use Coordinator.Addr()
-	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
-	tmp := new(FindCoordinatorResponse)
-
-	if err := tmp.decode(pd, version); err != nil {
-		return err
-	}
-
-	r.Err = tmp.Err
-
-	r.Coordinator = tmp.Coordinator
-	if tmp.Coordinator == nil {
-		return nil
-	}
-
-	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
-	// backwards compatibility
-	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
-	if err != nil {
-		return err
-	}
-	port, err := strconv.ParseInt(portstr, 10, 32)
-	if err != nil {
-		return err
-	}
-	r.CoordinatorID = r.Coordinator.ID()
-	r.CoordinatorHost = host
-	r.CoordinatorPort = int32(port)
-
-	return nil
-}
-
-func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
-	if r.Coordinator == nil {
-		r.Coordinator = new(Broker)
-		r.Coordinator.id = r.CoordinatorID
-		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
-	}
-
-	tmp := &FindCoordinatorResponse{
-		Version:     0,
-		Err:         r.Err,
-		Coordinator: r.Coordinator,
-	}
-
-	if err := tmp.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *ConsumerMetadataResponse) key() int16 {
-	return 10
-}
-
-func (r *ConsumerMetadataResponse) version() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
-	return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
deleted file mode 100644
index 46fb044..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_request.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package sarama
-
-import "time"
-
-type CreatePartitionsRequest struct {
-	TopicPartitions map[string]*TopicPartition
-	Timeout         time.Duration
-	ValidateOnly    bool
-}
-
-func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
-		return err
-	}
-
-	for topic, partition := range c.TopicPartitions {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := partition.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putInt32(int32(c.Timeout / time.Millisecond))
-
-	pe.putBool(c.ValidateOnly)
-
-	return nil
-}
-
-func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	c.TopicPartitions = make(map[string]*TopicPartition, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicPartitions[topic] = new(TopicPartition)
-		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.Timeout = time.Duration(timeout) * time.Millisecond
-
-	if c.ValidateOnly, err = pd.getBool(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *CreatePartitionsRequest) key() int16 {
-	return 37
-}
-
-func (r *CreatePartitionsRequest) version() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type TopicPartition struct {
-	Count      int32
-	Assignment [][]int32
-}
-
-func (t *TopicPartition) encode(pe packetEncoder) error {
-	pe.putInt32(t.Count)
-
-	if len(t.Assignment) == 0 {
-		pe.putInt32(-1)
-		return nil
-	}
-
-	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
-		return err
-	}
-
-	for _, assign := range t.Assignment {
-		if err := pe.putInt32Array(assign); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
-	if t.Count, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	n, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if n <= 0 {
-		return nil
-	}
-	t.Assignment = make([][]int32, n)
-
-	for i := 0; i < int(n); i++ {
-		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
deleted file mode 100644
index 12ce788..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type CreatePartitionsResponse struct {
-	ThrottleTime         time.Duration
-	TopicPartitionErrors map[string]*TopicPartitionError
-}
-
-func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
-		return err
-	}
-
-	for topic, partitionError := range c.TopicPartitionErrors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := partitionError.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
-		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *CreatePartitionsResponse) key() int16 {
-	return 37
-}
-
-func (r *CreatePartitionsResponse) version() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type TopicPartitionError struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (t *TopicPartitionError) Error() string {
-	text := t.Err.Error()
-	if t.ErrMsg != nil {
-		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
-	}
-	return text
-}
-
-func (t *TopicPartitionError) encode(pe packetEncoder) error {
-	pe.putInt16(int16(t.Err))
-
-	if err := pe.putNullableString(t.ErrMsg); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kerr)
-
-	if t.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
deleted file mode 100644
index 287acd0..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_request.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type CreateTopicsRequest struct {
-	Version int16
-
-	TopicDetails map[string]*TopicDetail
-	Timeout      time.Duration
-	ValidateOnly bool
-}
-
-func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
-		return err
-	}
-	for topic, detail := range c.TopicDetails {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := detail.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putInt32(int32(c.Timeout / time.Millisecond))
-
-	if c.Version >= 1 {
-		pe.putBool(c.ValidateOnly)
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicDetails = make(map[string]*TopicDetail, n)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicDetails[topic] = new(TopicDetail)
-		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.Timeout = time.Duration(timeout) * time.Millisecond
-
-	if version >= 1 {
-		c.ValidateOnly, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-
-		c.Version = version
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsRequest) key() int16 {
-	return 19
-}
-
-func (c *CreateTopicsRequest) version() int16 {
-	return c.Version
-}
-
-func (r *CreateTopicsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 2:
-		return V1_0_0_0
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
-
-type TopicDetail struct {
-	NumPartitions     int32
-	ReplicationFactor int16
-	ReplicaAssignment map[int32][]int32
-	ConfigEntries     map[string]*string
-}
-
-func (t *TopicDetail) encode(pe packetEncoder) error {
-	pe.putInt32(t.NumPartitions)
-	pe.putInt16(t.ReplicationFactor)
-
-	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
-		return err
-	}
-	for partition, assignment := range t.ReplicaAssignment {
-		pe.putInt32(partition)
-		if err := pe.putInt32Array(assignment); err != nil {
-			return err
-		}
-	}
-
-	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
-		return err
-	}
-	for configKey, configValue := range t.ConfigEntries {
-		if err := pe.putString(configKey); err != nil {
-			return err
-		}
-		if err := pe.putNullableString(configValue); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
-	if t.NumPartitions, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.ReplicaAssignment = make(map[int32][]int32, n)
-		for i := 0; i < n; i++ {
-			replica, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
-				return err
-			}
-		}
-	}
-
-	n, err = pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.ConfigEntries = make(map[string]*string, n)
-		for i := 0; i < n; i++ {
-			configKey, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
deleted file mode 100644
index 7e1448a..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type CreateTopicsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	TopicErrors  map[string]*TopicError
-}
-
-func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
-	if c.Version >= 2 {
-		pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-	}
-
-	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
-		return err
-	}
-	for topic, topicError := range c.TopicErrors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := topicError.encode(pe, c.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
-	c.Version = version
-
-	if version >= 2 {
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicErrors = make(map[string]*TopicError, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicErrors[topic] = new(TopicError)
-		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsResponse) key() int16 {
-	return 19
-}
-
-func (c *CreateTopicsResponse) version() int16 {
-	return c.Version
-}
-
-func (c *CreateTopicsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 2:
-		return V1_0_0_0
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
-
-type TopicError struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (t *TopicError) Error() string {
-	text := t.Err.Error()
-	if t.ErrMsg != nil {
-		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
-	}
-	return text
-}
-
-func (t *TopicError) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(t.Err))
-
-	if version >= 1 {
-		if err := pe.putNullableString(t.ErrMsg); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
-	kErr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kErr)
-
-	if version >= 1 {
-		if t.ErrMsg, err = pd.getNullableString(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
deleted file mode 100644
index af45fda..0000000
--- a/vendor/github.com/Shopify/sarama/decompress.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"io/ioutil"
-	"sync"
-
-	snappy "github.com/eapache/go-xerial-snappy"
-	"github.com/pierrec/lz4"
-)
-
-var (
-	lz4ReaderPool = sync.Pool{
-		New: func() interface{} {
-			return lz4.NewReader(nil)
-		},
-	}
-
-	gzipReaderPool sync.Pool
-)
-
-func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
-	switch cc {
-	case CompressionNone:
-		return data, nil
-	case CompressionGZIP:
-		var err error
-		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
-		if !ok {
-			reader, err = gzip.NewReader(bytes.NewReader(data))
-		} else {
-			err = reader.Reset(bytes.NewReader(data))
-		}
-
-		if err != nil {
-			return nil, err
-		}
-
-		defer gzipReaderPool.Put(reader)
-
-		return ioutil.ReadAll(reader)
-	case CompressionSnappy:
-		return snappy.Decode(data)
-	case CompressionLZ4:
-		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
-		if !ok {
-			reader = lz4.NewReader(bytes.NewReader(data))
-		} else {
-			reader.Reset(bytes.NewReader(data))
-		}
-		defer lz4ReaderPool.Put(reader)
-
-		return ioutil.ReadAll(reader)
-	case CompressionZSTD:
-		return zstdDecompress(nil, data)
-	default:
-		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
deleted file mode 100644
index 4ac8bbe..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DeleteGroupsRequest struct {
-	Groups []string
-}
-
-func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
-	return pe.putStringArray(r.Groups)
-}
-
-func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Groups, err = pd.getStringArray()
-	return
-}
-
-func (r *DeleteGroupsRequest) key() int16 {
-	return 42
-}
-
-func (r *DeleteGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
-	return V1_1_0_0
-}
-
-func (r *DeleteGroupsRequest) AddGroup(group string) {
-	r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
deleted file mode 100644
index 5e7b1ed..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_response.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type DeleteGroupsResponse struct {
-	ThrottleTime    time.Duration
-	GroupErrorCodes map[string]KError
-}
-
-func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
-		return err
-	}
-	for groupID, errorCode := range r.GroupErrorCodes {
-		if err := pe.putString(groupID); err != nil {
-			return err
-		}
-		pe.putInt16(int16(errorCode))
-	}
-
-	return nil
-}
-
-func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupErrorCodes = make(map[string]KError, n)
-	for i := 0; i < n; i++ {
-		groupID, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		errorCode, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-
-		r.GroupErrorCodes[groupID] = KError(errorCode)
-	}
-
-	return nil
-}
-
-func (r *DeleteGroupsResponse) key() int16 {
-	return 42
-}
-
-func (r *DeleteGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
-	return V1_1_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
deleted file mode 100644
index dc106b1..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-// request message format is:
-// [topic] timeout(int32)
-// where topic is:
-//  name(string) [partition]
-// where partition is:
-//  id(int32) offset(int64)
-
-type DeleteRecordsRequest struct {
-	Topics  map[string]*DeleteRecordsRequestTopic
-	Timeout time.Duration
-}
-
-func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(d.Topics)); err != nil {
-		return err
-	}
-	keys := make([]string, 0, len(d.Topics))
-	for topic := range d.Topics {
-		keys = append(keys, topic)
-	}
-	sort.Strings(keys)
-	for _, topic := range keys {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := d.Topics[topic].encode(pe); err != nil {
-			return err
-		}
-	}
-	pe.putInt32(int32(d.Timeout / time.Millisecond))
-
-	return nil
-}
-
-func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
-		for i := 0; i < n; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsRequestTopic)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			d.Topics[topic] = details
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.Timeout = time.Duration(timeout) * time.Millisecond
-
-	return nil
-}
-
-func (d *DeleteRecordsRequest) key() int16 {
-	return 21
-}
-
-func (d *DeleteRecordsRequest) version() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type DeleteRecordsRequestTopic struct {
-	PartitionOffsets map[int32]int64 // partition => offset
-}
-
-func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
-		return err
-	}
-	keys := make([]int32, 0, len(t.PartitionOffsets))
-	for partition := range t.PartitionOffsets {
-		keys = append(keys, partition)
-	}
-	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
-	for _, partition := range keys {
-		pe.putInt32(partition)
-		pe.putInt64(t.PartitionOffsets[partition])
-	}
-	return nil
-}
-
-func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.PartitionOffsets = make(map[int32]int64, n)
-		for i := 0; i < n; i++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			offset, err := pd.getInt64()
-			if err != nil {
-				return err
-			}
-			t.PartitionOffsets[partition] = offset
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
deleted file mode 100644
index d530b4c..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_response.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-// response message format is:
-// throttleMs(int32) [topic]
-// where topic is:
-//  name(string) [partition]
-// where partition is:
-//  id(int32) low_watermark(int64) error_code(int16)
-
-type DeleteRecordsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Topics       map[string]*DeleteRecordsResponseTopic
-}
-
-func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(d.Topics)); err != nil {
-		return err
-	}
-	keys := make([]string, 0, len(d.Topics))
-	for topic := range d.Topics {
-		keys = append(keys, topic)
-	}
-	sort.Strings(keys)
-	for _, topic := range keys {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := d.Topics[topic].encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
-	d.Version = version
-
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
-		for i := 0; i < n; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsResponseTopic)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			d.Topics[topic] = details
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteRecordsResponse) key() int16 {
-	return 21
-}
-
-func (d *DeleteRecordsResponse) version() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type DeleteRecordsResponseTopic struct {
-	Partitions map[int32]*DeleteRecordsResponsePartition
-}
-
-func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
-		return err
-	}
-	keys := make([]int32, 0, len(t.Partitions))
-	for partition := range t.Partitions {
-		keys = append(keys, partition)
-	}
-	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
-	for _, partition := range keys {
-		pe.putInt32(partition)
-		if err := t.Partitions[partition].encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
-		for i := 0; i < n; i++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsResponsePartition)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			t.Partitions[partition] = details
-		}
-	}
-
-	return nil
-}
-
-type DeleteRecordsResponsePartition struct {
-	LowWatermark int64
-	Err          KError
-}
-
-func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
-	pe.putInt64(t.LowWatermark)
-	pe.putInt16(int16(t.Err))
-	return nil
-}
-
-func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
-	lowWatermark, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	t.LowWatermark = lowWatermark
-
-	kErr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kErr)
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
deleted file mode 100644
index ba6780a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_request.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsRequest struct {
-	Version int16
-	Topics  []string
-	Timeout time.Duration
-}
-
-func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
-	if err := pe.putStringArray(d.Topics); err != nil {
-		return err
-	}
-	pe.putInt32(int32(d.Timeout / time.Millisecond))
-
-	return nil
-}
-
-func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
-	if d.Topics, err = pd.getStringArray(); err != nil {
-		return err
-	}
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.Timeout = time.Duration(timeout) * time.Millisecond
-	d.Version = version
-	return nil
-}
-
-func (d *DeleteTopicsRequest) key() int16 {
-	return 20
-}
-
-func (d *DeleteTopicsRequest) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteTopicsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
deleted file mode 100644
index 733961a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsResponse struct {
-	Version         int16
-	ThrottleTime    time.Duration
-	TopicErrorCodes map[string]KError
-}
-
-func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
-	if d.Version >= 1 {
-		pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-	}
-
-	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
-		return err
-	}
-	for topic, errorCode := range d.TopicErrorCodes {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		pe.putInt16(int16(errorCode))
-	}
-
-	return nil
-}
-
-func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 1 {
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-		d.Version = version
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	d.TopicErrorCodes = make(map[string]KError, n)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		errorCode, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-
-		d.TopicErrorCodes[topic] = KError(errorCode)
-	}
-
-	return nil
-}
-
-func (d *DeleteTopicsResponse) key() int16 {
-	return 20
-}
-
-func (d *DeleteTopicsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteTopicsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
deleted file mode 100644
index 4c34880..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_request.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package sarama
-
-type DescribeConfigsRequest struct {
-	Version         int16
-	Resources       []*ConfigResource
-	IncludeSynonyms bool
-}
-
-type ConfigResource struct {
-	Type        ConfigResourceType
-	Name        string
-	ConfigNames []string
-}
-
-func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(r.Resources)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Resources {
-		pe.putInt8(int8(c.Type))
-		if err := pe.putString(c.Name); err != nil {
-			return err
-		}
-
-		if len(c.ConfigNames) == 0 {
-			pe.putInt32(-1)
-			continue
-		}
-		if err := pe.putStringArray(c.ConfigNames); err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putBool(r.IncludeSynonyms)
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Resources = make([]*ConfigResource, n)
-
-	for i := 0; i < n; i++ {
-		r.Resources[i] = &ConfigResource{}
-		t, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Resources[i].Type = ConfigResourceType(t)
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		r.Resources[i].Name = name
-
-		confLength, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		if confLength == -1 {
-			continue
-		}
-
-		cfnames := make([]string, confLength)
-		for i := 0; i < confLength; i++ {
-			s, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			cfnames[i] = s
-		}
-		r.Resources[i].ConfigNames = cfnames
-	}
-	r.Version = version
-	if r.Version >= 1 {
-		b, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.IncludeSynonyms = b
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsRequest) key() int16 {
-	return 32
-}
-
-func (r *DescribeConfigsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V1_1_0_0
-	case 2:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
deleted file mode 100644
index 928f5a5..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type ConfigSource int8
-
-func (s ConfigSource) String() string {
-	switch s {
-	case SourceUnknown:
-		return "Unknown"
-	case SourceTopic:
-		return "Topic"
-	case SourceDynamicBroker:
-		return "DynamicBroker"
-	case SourceDynamicDefaultBroker:
-		return "DynamicDefaultBroker"
-	case SourceStaticBroker:
-		return "StaticBroker"
-	case SourceDefault:
-		return "Default"
-	}
-	return fmt.Sprintf("Source Invalid: %d", int(s))
-}
-
-const (
-	SourceUnknown ConfigSource = iota
-	SourceTopic
-	SourceDynamicBroker
-	SourceDynamicDefaultBroker
-	SourceStaticBroker
-	SourceDefault
-)
-
-type DescribeConfigsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Resources    []*ResourceResponse
-}
-
-type ResourceResponse struct {
-	ErrorCode int16
-	ErrorMsg  string
-	Type      ConfigResourceType
-	Name      string
-	Configs   []*ConfigEntry
-}
-
-type ConfigEntry struct {
-	Name      string
-	Value     string
-	ReadOnly  bool
-	Default   bool
-	Source    ConfigSource
-	Sensitive bool
-	Synonyms  []*ConfigSynonym
-}
-
-type ConfigSynonym struct {
-	ConfigName  string
-	ConfigValue string
-	Source      ConfigSource
-}
-
-func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	if err = pe.putArrayLength(len(r.Resources)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Resources {
-		if err = c.encode(pe, r.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Resources = make([]*ResourceResponse, n)
-	for i := 0; i < n; i++ {
-		rr := &ResourceResponse{}
-		if err := rr.decode(pd, version); err != nil {
-			return err
-		}
-		r.Resources[i] = rr
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsResponse) key() int16 {
-	return 32
-}
-
-func (r *DescribeConfigsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V1_0_0_0
-	case 2:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
-
-func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(r.ErrorCode)
-
-	if err = pe.putString(r.ErrorMsg); err != nil {
-		return err
-	}
-
-	pe.putInt8(int8(r.Type))
-
-	if err = pe.putString(r.Name); err != nil {
-		return err
-	}
-
-	if err = pe.putArrayLength(len(r.Configs)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Configs {
-		if err = c.encode(pe, version); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
-	ec, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.ErrorCode = ec
-
-	em, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.ErrorMsg = em
-
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	r.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Configs = make([]*ConfigEntry, n)
-	for i := 0; i < n; i++ {
-		c := &ConfigEntry{}
-		if err := c.decode(pd, version); err != nil {
-			return err
-		}
-		r.Configs[i] = c
-	}
-	return nil
-}
-
-func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
-	if err = pe.putString(r.Name); err != nil {
-		return err
-	}
-
-	if err = pe.putString(r.Value); err != nil {
-		return err
-	}
-
-	pe.putBool(r.ReadOnly)
-
-	if version <= 0 {
-		pe.putBool(r.Default)
-		pe.putBool(r.Sensitive)
-	} else {
-		pe.putInt8(int8(r.Source))
-		pe.putBool(r.Sensitive)
-
-		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
-			return err
-		}
-		for _, c := range r.Synonyms {
-			if err = c.encode(pe, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
-func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
-	if version == 0 {
-		r.Source = SourceUnknown
-	}
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Name = name
-
-	value, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Value = value
-
-	read, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.ReadOnly = read
-
-	if version == 0 {
-		defaultB, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.Default = defaultB
-		if defaultB {
-			r.Source = SourceDefault
-		}
-	} else {
-		source, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Source = ConfigSource(source)
-		r.Default = r.Source == SourceDefault
-	}
-
-	sensitive, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.Sensitive = sensitive
-
-	if version > 0 {
-		n, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.Synonyms = make([]*ConfigSynonym, n)
-
-		for i := 0; i < n; i++ {
-			s := &ConfigSynonym{}
-			if err := s.decode(pd, version); err != nil {
-				return err
-			}
-			r.Synonyms[i] = s
-		}
-	}
-	return nil
-}
-
-func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
-	err = pe.putString(c.ConfigName)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putString(c.ConfigValue)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt8(int8(c.Source))
-
-	return nil
-}
-
-func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
-	name, err := pd.getString()
-	if err != nil {
-		return nil
-	}
-	c.ConfigName = name
-
-	value, err := pd.getString()
-	if err != nil {
-		return nil
-	}
-	c.ConfigValue = value
-
-	source, err := pd.getInt8()
-	if err != nil {
-		return nil
-	}
-	c.Source = ConfigSource(source)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
deleted file mode 100644
index f8962da..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DescribeGroupsRequest struct {
-	Groups []string
-}
-
-func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
-	return pe.putStringArray(r.Groups)
-}
-
-func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Groups, err = pd.getStringArray()
-	return
-}
-
-func (r *DescribeGroupsRequest) key() int16 {
-	return 15
-}
-
-func (r *DescribeGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-func (r *DescribeGroupsRequest) AddGroup(group string) {
-	r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
deleted file mode 100644
index bc242e4..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package sarama
-
-type DescribeGroupsResponse struct {
-	Groups []*GroupDescription
-}
-
-func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(r.Groups)); err != nil {
-		return err
-	}
-
-	for _, groupDescription := range r.Groups {
-		if err := groupDescription.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Groups = make([]*GroupDescription, n)
-	for i := 0; i < n; i++ {
-		r.Groups[i] = new(GroupDescription)
-		if err := r.Groups[i].decode(pd); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeGroupsResponse) key() int16 {
-	return 15
-}
-
-func (r *DescribeGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-type GroupDescription struct {
-	Err          KError
-	GroupId      string
-	State        string
-	ProtocolType string
-	Protocol     string
-	Members      map[string]*GroupMemberDescription
-}
-
-func (gd *GroupDescription) encode(pe packetEncoder) error {
-	pe.putInt16(int16(gd.Err))
-
-	if err := pe.putString(gd.GroupId); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.State); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.ProtocolType); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.Protocol); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(gd.Members)); err != nil {
-		return err
-	}
-
-	for memberId, groupMemberDescription := range gd.Members {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-		if err := groupMemberDescription.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	gd.Err = KError(kerr)
-
-	if gd.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.State, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.ProtocolType, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.Protocol, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	gd.Members = make(map[string]*GroupMemberDescription)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		gd.Members[memberId] = new(GroupMemberDescription)
-		if err := gd.Members[memberId].decode(pd); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type GroupMemberDescription struct {
-	ClientId         string
-	ClientHost       string
-	MemberMetadata   []byte
-	MemberAssignment []byte
-}
-
-func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
-	if err := pe.putString(gmd.ClientId); err != nil {
-		return err
-	}
-	if err := pe.putString(gmd.ClientHost); err != nil {
-		return err
-	}
-	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
-		return err
-	}
-	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
-	if gmd.ClientId, err = pd.getString(); err != nil {
-		return
-	}
-	if gmd.ClientHost, err = pd.getString(); err != nil {
-		return
-	}
-	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
-		return
-	}
-	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
-	assignment := new(ConsumerGroupMemberAssignment)
-	err := decode(gmd.MemberAssignment, assignment)
-	return assignment, err
-}
-
-func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
-	metadata := new(ConsumerGroupMemberMetadata)
-	err := decode(gmd.MemberMetadata, metadata)
-	return metadata, err
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
deleted file mode 100644
index c0bf04e..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-// DescribeLogDirsRequest is a describe request to get partitions' log size
-type DescribeLogDirsRequest struct {
-	// Version 0 and 1 are equal
-	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
-	Version int16
-
-	// If this is an empty array, all topics will be queried
-	DescribeTopics []DescribeLogDirsRequestTopic
-}
-
-// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
-type DescribeLogDirsRequestTopic struct {
-	Topic        string
-	PartitionIDs []int32
-}
-
-func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
-	length := len(r.DescribeTopics)
-	if length == 0 {
-		// In order to query all topics we must send null
-		length = -1
-	}
-
-	if err := pe.putArrayLength(length); err != nil {
-		return err
-	}
-
-	for _, d := range r.DescribeTopics {
-		if err := pe.putString(d.Topic); err != nil {
-			return err
-		}
-
-		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == -1 {
-		n = 0
-	}
-
-	topics := make([]DescribeLogDirsRequestTopic, n)
-	for i := 0; i < n; i++ {
-		topics[i] = DescribeLogDirsRequestTopic{}
-
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		topics[i].Topic = topic
-
-		pIDs, err := pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-		topics[i].PartitionIDs = pIDs
-	}
-	r.DescribeTopics = topics
-
-	return nil
-}
-
-func (r *DescribeLogDirsRequest) key() int16 {
-	return 35
-}
-
-func (r *DescribeLogDirsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeLogDirsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
deleted file mode 100644
index 411da38..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package sarama
-
-import "time"
-
-type DescribeLogDirsResponse struct {
-	ThrottleTime time.Duration
-
-	// Version 0 and 1 are equal
-	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
-	Version int16
-
-	LogDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
-		return err
-	}
-
-	for _, dir := range r.LogDirs {
-		if err := dir.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	// Decode array of DescribeLogDirsResponseDirMetadata
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
-	for i := 0; i < n; i++ {
-		dir := DescribeLogDirsResponseDirMetadata{}
-		if err := dir.decode(pd, version); err != nil {
-			return err
-		}
-		r.LogDirs[i] = dir
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponse) key() int16 {
-	return 35
-}
-
-func (r *DescribeLogDirsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeLogDirsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type DescribeLogDirsResponseDirMetadata struct {
-	ErrorCode KError
-
-	// The absolute log directory path
-	Path   string
-	Topics []DescribeLogDirsResponseTopic
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.ErrorCode))
-
-	if err := pe.putString(r.Path); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Topics)); err != nil {
-		return err
-	}
-	for _, topic := range r.Topics {
-		if err := topic.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
-	errCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.ErrorCode = KError(errCode)
-
-	path, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Path = path
-
-	// Decode array of DescribeLogDirsResponseTopic
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Topics = make([]DescribeLogDirsResponseTopic, n)
-	for i := 0; i < n; i++ {
-		t := DescribeLogDirsResponseTopic{}
-
-		if err := t.decode(pd, version); err != nil {
-			return err
-		}
-
-		r.Topics[i] = t
-	}
-
-	return nil
-}
-
-// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
-type DescribeLogDirsResponseTopic struct {
-	Topic      string
-	Partitions []DescribeLogDirsResponsePartition
-}
-
-func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
-	if err := pe.putString(r.Topic); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
-		return err
-	}
-	for _, partition := range r.Partitions {
-		if err := partition.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Topic = t
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
-	for i := 0; i < n; i++ {
-		p := DescribeLogDirsResponsePartition{}
-		if err := p.decode(pd, version); err != nil {
-			return err
-		}
-		r.Partitions[i] = p
-	}
-
-	return nil
-}
-
-// DescribeLogDirsResponsePartition describes a partition's log directory
-type DescribeLogDirsResponsePartition struct {
-	PartitionID int32
-
-	// The size of the log segments of the partition in bytes.
-	Size int64
-
-	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
-	// current replica's LEO (if it is the future log for the partition)
-	OffsetLag int64
-
-	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
-	// the replica in the future.
-	IsTemporary bool
-}
-
-func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
-	pe.putInt32(r.PartitionID)
-	pe.putInt64(r.Size)
-	pe.putInt64(r.OffsetLag)
-	pe.putBool(r.IsTemporary)
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
-	pID, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.PartitionID = pID
-
-	size, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	r.Size = size
-
-	lag, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	r.OffsetLag = lag
-
-	isTemp, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.IsTemporary = isTemp
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
deleted file mode 100644
index b5b5940..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package sarama
-
-// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
-type DescribeUserScramCredentialsRequest struct {
-	// Version 0 is currently only supported
-	Version int16
-
-	// If this is an empty array, all users will be queried
-	DescribeUsers []DescribeUserScramCredentialsRequestUser
-}
-
-// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
-type DescribeUserScramCredentialsRequestUser struct {
-	Name string
-}
-
-func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
-	pe.putCompactArrayLength(len(r.DescribeUsers))
-	for _, d := range r.DescribeUsers {
-		if err := pe.putCompactString(d.Name); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == -1 {
-		n = 0
-	}
-
-	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
-	for i := 0; i < n; i++ {
-		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
-		if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) key() int16 {
-	return 50
-}
-
-func (r *DescribeUserScramCredentialsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
deleted file mode 100644
index 2656c2f..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import "time"
-
-type ScramMechanismType int8
-
-const (
-	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
-	SCRAM_MECHANISM_SHA_256                           // 1
-	SCRAM_MECHANISM_SHA_512                           // 2
-)
-
-func (s ScramMechanismType) String() string {
-	switch s {
-	case 1:
-		return SASLTypeSCRAMSHA256
-	case 2:
-		return SASLTypeSCRAMSHA512
-	default:
-		return "Unknown"
-	}
-}
-
-type DescribeUserScramCredentialsResponse struct {
-	// Version 0 is currently only supported
-	Version int16
-
-	ThrottleTime time.Duration
-
-	ErrorCode    KError
-	ErrorMessage *string
-
-	Results []*DescribeUserScramCredentialsResult
-}
-
-type DescribeUserScramCredentialsResult struct {
-	User string
-
-	ErrorCode    KError
-	ErrorMessage *string
-
-	CredentialInfos []*UserScramCredentialsResponseInfo
-}
-
-type UserScramCredentialsResponseInfo struct {
-	Mechanism  ScramMechanismType
-	Iterations int32
-}
-
-func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.Results))
-	for _, u := range r.Results {
-		if err := pe.putCompactString(u.User); err != nil {
-			return err
-		}
-		pe.putInt16(int16(u.ErrorCode))
-		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
-			return err
-		}
-
-		pe.putCompactArrayLength(len(u.CredentialInfos))
-		for _, c := range u.CredentialInfos {
-			pe.putInt8(int8(c.Mechanism))
-			pe.putInt32(c.Iterations)
-			pe.putEmptyTaggedFieldArray()
-		}
-
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numUsers, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numUsers > 0 {
-		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
-		for i := 0; i < numUsers; i++ {
-			r.Results[i] = &DescribeUserScramCredentialsResult{}
-			if r.Results[i].User, err = pd.getCompactString(); err != nil {
-				return err
-			}
-
-			errorCode, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-			r.Results[i].ErrorCode = KError(errorCode)
-			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-				return err
-			}
-
-			numCredentialInfos, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-
-			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
-			for j := 0; j < numCredentialInfos; j++ {
-				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
-				scramMechanism, err := pd.getInt8()
-				if err != nil {
-					return err
-				}
-				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
-				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
-					return err
-				}
-				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-
-			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) key() int16 {
-	return 50
-}
-
-func (r *DescribeUserScramCredentialsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
-	return 2
-}
-
-func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
deleted file mode 100644
index 7bf9ff9..0000000
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: sarama
-
-up:
-  - go:
-      version: '1.16'
-
-commands:
-  test:
-    run: make test
-    desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml
deleted file mode 100644
index 8e9c24e..0000000
--- a/vendor/github.com/Shopify/sarama/docker-compose.yml
+++ /dev/null
@@ -1,134 +0,0 @@
-version: '3.7'
-services:
-  zookeeper-1:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '1'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  zookeeper-2:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '2'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  zookeeper-3:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '3'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  kafka-1:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '1'
-      KAFKA_BROKER_RACK: '1'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-2:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '2'
-      KAFKA_BROKER_RACK: '2'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-3:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '3'
-      KAFKA_BROKER_RACK: '3'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-4:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '4'
-      KAFKA_BROKER_RACK: '4'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-5:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '5'
-      KAFKA_BROKER_RACK: '5'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  toxiproxy:
-    image: 'shopify/toxiproxy:2.1.4'
-    ports:
-      # The tests themselves actually start the proxies on these ports
-      - '29091:29091'
-      - '29092:29092'
-      - '29093:29093'
-      - '29094:29094'
-      - '29095:29095'
-      # This is the toxiproxy API port
-      - '8474:8474'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
deleted file mode 100644
index dab54f8..0000000
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
-	encode(pe packetEncoder) error
-}
-
-type encoderWithHeader interface {
-	encoder
-	headerVersion() int16
-}
-
-// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
-func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
-	if e == nil {
-		return nil, nil
-	}
-
-	var prepEnc prepEncoder
-	var realEnc realEncoder
-
-	err := e.encode(&prepEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
-		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
-	}
-
-	realEnc.raw = make([]byte, prepEnc.length)
-	realEnc.registry = metricRegistry
-	err = e.encode(&realEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	return realEnc.raw, nil
-}
-
-// decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
-	decode(pd packetDecoder) error
-}
-
-type versionedDecoder interface {
-	decode(pd packetDecoder, version int16) error
-}
-
-// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
-	if buf == nil {
-		return nil
-	}
-
-	helper := realDecoder{raw: buf}
-	err := in.decode(&helper)
-	if err != nil {
-		return err
-	}
-
-	if helper.off != len(buf) {
-		return PacketDecodingError{"invalid length"}
-	}
-
-	return nil
-}
-
-func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
-	if buf == nil {
-		return nil
-	}
-
-	helper := realDecoder{raw: buf}
-	err := in.decode(&helper, version)
-	if err != nil {
-		return err
-	}
-
-	if helper.off != len(buf) {
-		return PacketDecodingError{"invalid length"}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
deleted file mode 100644
index 6635425..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_request.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package sarama
-
-type EndTxnRequest struct {
-	TransactionalID   string
-	ProducerID        int64
-	ProducerEpoch     int16
-	TransactionResult bool
-}
-
-func (a *EndTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-
-	pe.putInt64(a.ProducerID)
-
-	pe.putInt16(a.ProducerEpoch)
-
-	pe.putBool(a.TransactionResult)
-
-	return nil
-}
-
-func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-	if a.TransactionResult, err = pd.getBool(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (a *EndTxnRequest) key() int16 {
-	return 26
-}
-
-func (a *EndTxnRequest) version() int16 {
-	return 0
-}
-
-func (r *EndTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *EndTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
deleted file mode 100644
index 7639767..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type EndTxnResponse struct {
-	ThrottleTime time.Duration
-	Err          KError
-}
-
-func (e *EndTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(e.Err))
-	return nil
-}
-
-func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	e.Err = KError(kerr)
-
-	return nil
-}
-
-func (e *EndTxnResponse) key() int16 {
-	return 25
-}
-
-func (e *EndTxnResponse) version() int16 {
-	return 0
-}
-
-func (r *EndTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (e *EndTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
deleted file mode 100644
index 0fca0a3..0000000
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ /dev/null
@@ -1,409 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-)
-
-// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
-
-// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
-var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
-
-// ErrClosedClient is the error returned when a method is called on a client that has been closed.
-var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
-
-// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
-
-// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
-
-// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
-var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
-
-// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var ErrNotConnected = errors.New("kafka: broker not connected")
-
-// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
-
-// ErrShuttingDown is returned when a producer receives a message during shutdown.
-var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
-
-// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
-var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
-
-// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
-// a RecordBatch.
-var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
-
-// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
-// is lower than 0.10.0.0.
-var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
-
-// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
-// the metadata.
-var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
-
-// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
-var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
-
-// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-type PacketEncodingError struct {
-	Info string
-}
-
-func (err PacketEncodingError) Error() string {
-	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
-}
-
-// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type PacketDecodingError struct {
-	Info string
-}
-
-func (err PacketDecodingError) Error() string {
-	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
-}
-
-// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
-// when the specified configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
-	return "kafka: invalid configuration (" + string(err) + ")"
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// MultiError is used to contain multi error.
-type MultiError struct {
-	Errors *[]error
-}
-
-func (mErr MultiError) Error() string {
-	errString := ""
-	for _, err := range *mErr.Errors {
-		errString += err.Error() + ","
-	}
-	return errString
-}
-
-func (mErr MultiError) PrettyError() string {
-	errString := ""
-	for _, err := range *mErr.Errors {
-		errString += err.Error() + "\n"
-	}
-	return errString
-}
-
-// ErrDeleteRecords is the type of error returned when fail to delete the required records
-type ErrDeleteRecords struct {
-	MultiError
-}
-
-func (err ErrDeleteRecords) Error() string {
-	return "kafka server: failed to delete records " + err.MultiError.Error()
-}
-
-type ErrReassignPartitions struct {
-	MultiError
-}
-
-func (err ErrReassignPartitions) Error() string {
-	return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
-}
-
-// Numeric error codes returned by the Kafka server.
-const (
-	ErrNoError                            KError = 0
-	ErrUnknown                            KError = -1
-	ErrOffsetOutOfRange                   KError = 1
-	ErrInvalidMessage                     KError = 2
-	ErrUnknownTopicOrPartition            KError = 3
-	ErrInvalidMessageSize                 KError = 4
-	ErrLeaderNotAvailable                 KError = 5
-	ErrNotLeaderForPartition              KError = 6
-	ErrRequestTimedOut                    KError = 7
-	ErrBrokerNotAvailable                 KError = 8
-	ErrReplicaNotAvailable                KError = 9
-	ErrMessageSizeTooLarge                KError = 10
-	ErrStaleControllerEpochCode           KError = 11
-	ErrOffsetMetadataTooLarge             KError = 12
-	ErrNetworkException                   KError = 13
-	ErrOffsetsLoadInProgress              KError = 14
-	ErrConsumerCoordinatorNotAvailable    KError = 15
-	ErrNotCoordinatorForConsumer          KError = 16
-	ErrInvalidTopic                       KError = 17
-	ErrMessageSetSizeTooLarge             KError = 18
-	ErrNotEnoughReplicas                  KError = 19
-	ErrNotEnoughReplicasAfterAppend       KError = 20
-	ErrInvalidRequiredAcks                KError = 21
-	ErrIllegalGeneration                  KError = 22
-	ErrInconsistentGroupProtocol          KError = 23
-	ErrInvalidGroupId                     KError = 24
-	ErrUnknownMemberId                    KError = 25
-	ErrInvalidSessionTimeout              KError = 26
-	ErrRebalanceInProgress                KError = 27
-	ErrInvalidCommitOffsetSize            KError = 28
-	ErrTopicAuthorizationFailed           KError = 29
-	ErrGroupAuthorizationFailed           KError = 30
-	ErrClusterAuthorizationFailed         KError = 31
-	ErrInvalidTimestamp                   KError = 32
-	ErrUnsupportedSASLMechanism           KError = 33
-	ErrIllegalSASLState                   KError = 34
-	ErrUnsupportedVersion                 KError = 35
-	ErrTopicAlreadyExists                 KError = 36
-	ErrInvalidPartitions                  KError = 37
-	ErrInvalidReplicationFactor           KError = 38
-	ErrInvalidReplicaAssignment           KError = 39
-	ErrInvalidConfig                      KError = 40
-	ErrNotController                      KError = 41
-	ErrInvalidRequest                     KError = 42
-	ErrUnsupportedForMessageFormat        KError = 43
-	ErrPolicyViolation                    KError = 44
-	ErrOutOfOrderSequenceNumber           KError = 45
-	ErrDuplicateSequenceNumber            KError = 46
-	ErrInvalidProducerEpoch               KError = 47
-	ErrInvalidTxnState                    KError = 48
-	ErrInvalidProducerIDMapping           KError = 49
-	ErrInvalidTransactionTimeout          KError = 50
-	ErrConcurrentTransactions             KError = 51
-	ErrTransactionCoordinatorFenced       KError = 52
-	ErrTransactionalIDAuthorizationFailed KError = 53
-	ErrSecurityDisabled                   KError = 54
-	ErrOperationNotAttempted              KError = 55
-	ErrKafkaStorageError                  KError = 56
-	ErrLogDirNotFound                     KError = 57
-	ErrSASLAuthenticationFailed           KError = 58
-	ErrUnknownProducerID                  KError = 59
-	ErrReassignmentInProgress             KError = 60
-	ErrDelegationTokenAuthDisabled        KError = 61
-	ErrDelegationTokenNotFound            KError = 62
-	ErrDelegationTokenOwnerMismatch       KError = 63
-	ErrDelegationTokenRequestNotAllowed   KError = 64
-	ErrDelegationTokenAuthorizationFailed KError = 65
-	ErrDelegationTokenExpired             KError = 66
-	ErrInvalidPrincipalType               KError = 67
-	ErrNonEmptyGroup                      KError = 68
-	ErrGroupIDNotFound                    KError = 69
-	ErrFetchSessionIDNotFound             KError = 70
-	ErrInvalidFetchSessionEpoch           KError = 71
-	ErrListenerNotFound                   KError = 72
-	ErrTopicDeletionDisabled              KError = 73
-	ErrFencedLeaderEpoch                  KError = 74
-	ErrUnknownLeaderEpoch                 KError = 75
-	ErrUnsupportedCompressionType         KError = 76
-	ErrStaleBrokerEpoch                   KError = 77
-	ErrOffsetNotAvailable                 KError = 78
-	ErrMemberIdRequired                   KError = 79
-	ErrPreferredLeaderNotAvailable        KError = 80
-	ErrGroupMaxSizeReached                KError = 81
-	ErrFencedInstancedId                  KError = 82
-	ErrEligibleLeadersNotAvailable        KError = 83
-	ErrElectionNotNeeded                  KError = 84
-	ErrNoReassignmentInProgress           KError = 85
-	ErrGroupSubscribedToTopic             KError = 86
-	ErrInvalidRecord                      KError = 87
-	ErrUnstableOffsetCommit               KError = 88
-)
-
-func (err KError) Error() string {
-	// Error messages stolen/adapted from
-	// https://kafka.apache.org/protocol#protocol_error_codes
-	switch err {
-	case ErrNoError:
-		return "kafka server: Not an error, why are you printing me?"
-	case ErrUnknown:
-		return "kafka server: Unexpected (unknown?) server error."
-	case ErrOffsetOutOfRange:
-		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
-	case ErrInvalidMessage:
-		return "kafka server: Message contents does not match its CRC."
-	case ErrUnknownTopicOrPartition:
-		return "kafka server: Request was for a topic or partition that does not exist on this broker."
-	case ErrInvalidMessageSize:
-		return "kafka server: The message has a negative size."
-	case ErrLeaderNotAvailable:
-		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
-	case ErrNotLeaderForPartition:
-		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
-	case ErrRequestTimedOut:
-		return "kafka server: Request exceeded the user-specified time limit in the request."
-	case ErrBrokerNotAvailable:
-		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
-	case ErrReplicaNotAvailable:
-		return "kafka server: Replica information not available, one or more brokers are down."
-	case ErrMessageSizeTooLarge:
-		return "kafka server: Message was too large, server rejected it to avoid allocation error."
-	case ErrStaleControllerEpochCode:
-		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
-	case ErrOffsetMetadataTooLarge:
-		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
-	case ErrNetworkException:
-		return "kafka server: The server disconnected before a response was received."
-	case ErrOffsetsLoadInProgress:
-		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
-	case ErrConsumerCoordinatorNotAvailable:
-		return "kafka server: Offset's topic has not yet been created."
-	case ErrNotCoordinatorForConsumer:
-		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
-	case ErrInvalidTopic:
-		return "kafka server: The request attempted to perform an operation on an invalid topic."
-	case ErrMessageSetSizeTooLarge:
-		return "kafka server: The request included message batch larger than the configured segment size on the server."
-	case ErrNotEnoughReplicas:
-		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
-	case ErrNotEnoughReplicasAfterAppend:
-		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
-	case ErrInvalidRequiredAcks:
-		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
-	case ErrIllegalGeneration:
-		return "kafka server: The provided generation id is not the current generation."
-	case ErrInconsistentGroupProtocol:
-		return "kafka server: The provider group protocol type is incompatible with the other members."
-	case ErrInvalidGroupId:
-		return "kafka server: The provided group id was empty."
-	case ErrUnknownMemberId:
-		return "kafka server: The provided member is not known in the current generation."
-	case ErrInvalidSessionTimeout:
-		return "kafka server: The provided session timeout is outside the allowed range."
-	case ErrRebalanceInProgress:
-		return "kafka server: A rebalance for the group is in progress. Please re-join the group."
-	case ErrInvalidCommitOffsetSize:
-		return "kafka server: The provided commit metadata was too large."
-	case ErrTopicAuthorizationFailed:
-		return "kafka server: The client is not authorized to access this topic."
-	case ErrGroupAuthorizationFailed:
-		return "kafka server: The client is not authorized to access this group."
-	case ErrClusterAuthorizationFailed:
-		return "kafka server: The client is not authorized to send this request type."
-	case ErrInvalidTimestamp:
-		return "kafka server: The timestamp of the message is out of acceptable range."
-	case ErrUnsupportedSASLMechanism:
-		return "kafka server: The broker does not support the requested SASL mechanism."
-	case ErrIllegalSASLState:
-		return "kafka server: Request is not valid given the current SASL state."
-	case ErrUnsupportedVersion:
-		return "kafka server: The version of API is not supported."
-	case ErrTopicAlreadyExists:
-		return "kafka server: Topic with this name already exists."
-	case ErrInvalidPartitions:
-		return "kafka server: Number of partitions is invalid."
-	case ErrInvalidReplicationFactor:
-		return "kafka server: Replication-factor is invalid."
-	case ErrInvalidReplicaAssignment:
-		return "kafka server: Replica assignment is invalid."
-	case ErrInvalidConfig:
-		return "kafka server: Configuration is invalid."
-	case ErrNotController:
-		return "kafka server: This is not the correct controller for this cluster."
-	case ErrInvalidRequest:
-		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
-	case ErrUnsupportedForMessageFormat:
-		return "kafka server: The requested operation is not supported by the message format version."
-	case ErrPolicyViolation:
-		return "kafka server: Request parameters do not satisfy the configured policy."
-	case ErrOutOfOrderSequenceNumber:
-		return "kafka server: The broker received an out of order sequence number."
-	case ErrDuplicateSequenceNumber:
-		return "kafka server: The broker received a duplicate sequence number."
-	case ErrInvalidProducerEpoch:
-		return "kafka server: Producer attempted an operation with an old epoch."
-	case ErrInvalidTxnState:
-		return "kafka server: The producer attempted a transactional operation in an invalid state."
-	case ErrInvalidProducerIDMapping:
-		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
-	case ErrInvalidTransactionTimeout:
-		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
-	case ErrConcurrentTransactions:
-		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
-	case ErrTransactionCoordinatorFenced:
-		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
-	case ErrTransactionalIDAuthorizationFailed:
-		return "kafka server: Transactional ID authorization failed."
-	case ErrSecurityDisabled:
-		return "kafka server: Security features are disabled."
-	case ErrOperationNotAttempted:
-		return "kafka server: The broker did not attempt to execute this operation."
-	case ErrKafkaStorageError:
-		return "kafka server: Disk error when trying to access log file on the disk."
-	case ErrLogDirNotFound:
-		return "kafka server: The specified log directory is not found in the broker config."
-	case ErrSASLAuthenticationFailed:
-		return "kafka server: SASL Authentication failed."
-	case ErrUnknownProducerID:
-		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
-	case ErrReassignmentInProgress:
-		return "kafka server: A partition reassignment is in progress."
-	case ErrDelegationTokenAuthDisabled:
-		return "kafka server: Delegation Token feature is not enabled."
-	case ErrDelegationTokenNotFound:
-		return "kafka server: Delegation Token is not found on server."
-	case ErrDelegationTokenOwnerMismatch:
-		return "kafka server: Specified Principal is not valid Owner/Renewer."
-	case ErrDelegationTokenRequestNotAllowed:
-		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
-	case ErrDelegationTokenAuthorizationFailed:
-		return "kafka server: Delegation Token authorization failed."
-	case ErrDelegationTokenExpired:
-		return "kafka server: Delegation Token is expired."
-	case ErrInvalidPrincipalType:
-		return "kafka server: Supplied principalType is not supported."
-	case ErrNonEmptyGroup:
-		return "kafka server: The group is not empty."
-	case ErrGroupIDNotFound:
-		return "kafka server: The group id does not exist."
-	case ErrFetchSessionIDNotFound:
-		return "kafka server: The fetch session ID was not found."
-	case ErrInvalidFetchSessionEpoch:
-		return "kafka server: The fetch session epoch is invalid."
-	case ErrListenerNotFound:
-		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
-	case ErrTopicDeletionDisabled:
-		return "kafka server: Topic deletion is disabled."
-	case ErrFencedLeaderEpoch:
-		return "kafka server: The leader epoch in the request is older than the epoch on the broker."
-	case ErrUnknownLeaderEpoch:
-		return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
-	case ErrUnsupportedCompressionType:
-		return "kafka server: The requesting client does not support the compression type of given partition."
-	case ErrStaleBrokerEpoch:
-		return "kafka server: Broker epoch has changed"
-	case ErrOffsetNotAvailable:
-		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
-	case ErrMemberIdRequired:
-		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
-	case ErrPreferredLeaderNotAvailable:
-		return "kafka server: The preferred leader was not available"
-	case ErrGroupMaxSizeReached:
-		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
-	case ErrFencedInstancedId:
-		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
-	case ErrEligibleLeadersNotAvailable:
-		return "kafka server: Eligible topic partition leaders are not available."
-	case ErrElectionNotNeeded:
-		return "kafka server: Leader election not needed for topic partition."
-	case ErrNoReassignmentInProgress:
-		return "kafka server: No partition reassignment is in progress."
-	case ErrGroupSubscribedToTopic:
-		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."
-	case ErrInvalidRecord:
-		return "kafka server: This record has failed the validation on broker and hence will be rejected."
-	case ErrUnstableOffsetCommit:
-		return "kafka server: There are unstable offsets that need to be cleared."
-	}
-
-	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
deleted file mode 100644
index f893aef..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
-	Version            int16
-	currentLeaderEpoch int32
-	fetchOffset        int64
-	logStartOffset     int64
-	maxBytes           int32
-}
-
-func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
-	b.Version = version
-	if b.Version >= 9 {
-		pe.putInt32(b.currentLeaderEpoch)
-	}
-	pe.putInt64(b.fetchOffset)
-	if b.Version >= 5 {
-		pe.putInt64(b.logStartOffset)
-	}
-	pe.putInt32(b.maxBytes)
-	return nil
-}
-
-func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	b.Version = version
-	if b.Version >= 9 {
-		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	if b.fetchOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if b.Version >= 5 {
-		if b.logStartOffset, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-	if b.maxBytes, err = pd.getInt32(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
-// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
-type FetchRequest struct {
-	MaxWaitTime  int32
-	MinBytes     int32
-	MaxBytes     int32
-	Version      int16
-	Isolation    IsolationLevel
-	SessionID    int32
-	SessionEpoch int32
-	blocks       map[string]map[int32]*fetchRequestBlock
-	forgotten    map[string][]int32
-	RackID       string
-}
-
-type IsolationLevel int8
-
-const (
-	ReadUncommitted IsolationLevel = iota
-	ReadCommitted
-)
-
-func (r *FetchRequest) encode(pe packetEncoder) (err error) {
-	pe.putInt32(-1) // replica ID is always -1 for clients
-	pe.putInt32(r.MaxWaitTime)
-	pe.putInt32(r.MinBytes)
-	if r.Version >= 3 {
-		pe.putInt32(r.MaxBytes)
-	}
-	if r.Version >= 4 {
-		pe.putInt8(int8(r.Isolation))
-	}
-	if r.Version >= 7 {
-		pe.putInt32(r.SessionID)
-		pe.putInt32(r.SessionEpoch)
-	}
-	err = pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, blocks := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(blocks))
-		if err != nil {
-			return err
-		}
-		for partition, block := range blocks {
-			pe.putInt32(partition)
-			err = block.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	if r.Version >= 7 {
-		err = pe.putArrayLength(len(r.forgotten))
-		if err != nil {
-			return err
-		}
-		for topic, partitions := range r.forgotten {
-			err = pe.putString(topic)
-			if err != nil {
-				return err
-			}
-			err = pe.putArrayLength(len(partitions))
-			if err != nil {
-				return err
-			}
-			for _, partition := range partitions {
-				pe.putInt32(partition)
-			}
-		}
-	}
-	if r.Version >= 11 {
-		err = pe.putString(r.RackID)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if _, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.MinBytes, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.Version >= 3 {
-		if r.MaxBytes, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	if r.Version >= 4 {
-		isolation, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Isolation = IsolationLevel(isolation)
-	}
-	if r.Version >= 7 {
-		r.SessionID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-		r.SessionEpoch, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			fetchBlock := &fetchRequestBlock{}
-			if err = fetchBlock.decode(pd, r.Version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = fetchBlock
-		}
-	}
-
-	if r.Version >= 7 {
-		forgottenCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.forgotten = make(map[string][]int32)
-		for i := 0; i < forgottenCount; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getArrayLength()
-			if err != nil {
-				return err
-			}
-			r.forgotten[topic] = make([]int32, partitionCount)
-
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				r.forgotten[topic][j] = partition
-			}
-		}
-	}
-
-	if r.Version >= 11 {
-		r.RackID, err = pd.getString()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchRequest) key() int16 {
-	return 1
-}
-
-func (r *FetchRequest) version() int16 {
-	return r.Version
-}
-
-func (r *FetchRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *FetchRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 0:
-		return MinVersion
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_10_1_0
-	case 4, 5:
-		return V0_11_0_0
-	case 6:
-		return V1_0_0_0
-	case 7:
-		return V1_1_0_0
-	case 8:
-		return V2_0_0_0
-	case 9, 10:
-		return V2_1_0_0
-	case 11:
-		return V2_3_0_0
-	default:
-		return MaxVersion
-	}
-}
-
-func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
-	}
-
-	if r.Version >= 7 && r.forgotten == nil {
-		r.forgotten = make(map[string][]int32)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
-	}
-
-	tmp := new(fetchRequestBlock)
-	tmp.Version = r.Version
-	tmp.maxBytes = maxBytes
-	tmp.fetchOffset = fetchOffset
-	if r.Version >= 9 {
-		tmp.currentLeaderEpoch = int32(-1)
-	}
-
-	r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
deleted file mode 100644
index 54b8828..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-type AbortedTransaction struct {
-	ProducerID  int64
-	FirstOffset int64
-}
-
-func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
-	if t.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if t.FirstOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
-	pe.putInt64(t.ProducerID)
-	pe.putInt64(t.FirstOffset)
-
-	return nil
-}
-
-type FetchResponseBlock struct {
-	Err                  KError
-	HighWaterMarkOffset  int64
-	LastStableOffset     int64
-	LogStartOffset       int64
-	AbortedTransactions  []*AbortedTransaction
-	PreferredReadReplica int32
-	Records              *Records // deprecated: use FetchResponseBlock.RecordsSet
-	RecordsSet           []*Records
-	Partial              bool
-}
-
-func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	b.HighWaterMarkOffset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 4 {
-		b.LastStableOffset, err = pd.getInt64()
-		if err != nil {
-			return err
-		}
-
-		if version >= 5 {
-			b.LogStartOffset, err = pd.getInt64()
-			if err != nil {
-				return err
-			}
-		}
-
-		numTransact, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		if numTransact >= 0 {
-			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
-		}
-
-		for i := 0; i < numTransact; i++ {
-			transact := new(AbortedTransaction)
-			if err = transact.decode(pd); err != nil {
-				return err
-			}
-			b.AbortedTransactions[i] = transact
-		}
-	}
-
-	if version >= 11 {
-		b.PreferredReadReplica, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	} else {
-		b.PreferredReadReplica = -1
-	}
-
-	recordsSize, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	recordsDecoder, err := pd.getSubset(int(recordsSize))
-	if err != nil {
-		return err
-	}
-
-	b.RecordsSet = []*Records{}
-
-	for recordsDecoder.remaining() > 0 {
-		records := &Records{}
-		if err := records.decode(recordsDecoder); err != nil {
-			// If we have at least one decoded records, this is not an error
-			if err == ErrInsufficientData {
-				if len(b.RecordsSet) == 0 {
-					b.Partial = true
-				}
-				break
-			}
-			return err
-		}
-
-		partial, err := records.isPartial()
-		if err != nil {
-			return err
-		}
-
-		n, err := records.numRecords()
-		if err != nil {
-			return err
-		}
-
-		if n > 0 || (partial && len(b.RecordsSet) == 0) {
-			b.RecordsSet = append(b.RecordsSet, records)
-
-			if b.Records == nil {
-				b.Records = records
-			}
-		}
-
-		overflow, err := records.isOverflow()
-		if err != nil {
-			return err
-		}
-
-		if partial || overflow {
-			break
-		}
-	}
-
-	return nil
-}
-
-func (b *FetchResponseBlock) numRecords() (int, error) {
-	sum := 0
-
-	for _, records := range b.RecordsSet {
-		count, err := records.numRecords()
-		if err != nil {
-			return 0, err
-		}
-
-		sum += count
-	}
-
-	return sum, nil
-}
-
-func (b *FetchResponseBlock) isPartial() (bool, error) {
-	if b.Partial {
-		return true, nil
-	}
-
-	if len(b.RecordsSet) == 1 {
-		return b.RecordsSet[0].isPartial()
-	}
-
-	return false, nil
-}
-
-func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-
-	pe.putInt64(b.HighWaterMarkOffset)
-
-	if version >= 4 {
-		pe.putInt64(b.LastStableOffset)
-
-		if version >= 5 {
-			pe.putInt64(b.LogStartOffset)
-		}
-
-		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
-			return err
-		}
-		for _, transact := range b.AbortedTransactions {
-			if err = transact.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	if version >= 11 {
-		pe.putInt32(b.PreferredReadReplica)
-	}
-
-	pe.push(&lengthField{})
-	for _, records := range b.RecordsSet {
-		err = records.encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-	return pe.pop()
-}
-
-func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
-	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
-	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
-	at := b.AbortedTransactions
-	sort.Slice(
-		at,
-		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
-	)
-	return at
-}
-
-type FetchResponse struct {
-	Blocks        map[string]map[int32]*FetchResponseBlock
-	ThrottleTime  time.Duration
-	ErrorCode     int16
-	SessionID     int32
-	Version       int16
-	LogAppendTime bool
-	Timestamp     time.Time
-}
-
-func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.Version >= 1 {
-		throttle, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
-	}
-
-	if r.Version >= 7 {
-		r.ErrorCode, err = pd.getInt16()
-		if err != nil {
-			return err
-		}
-		r.SessionID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(FetchResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchResponse) encode(pe packetEncoder) (err error) {
-	if r.Version >= 1 {
-		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	}
-
-	if r.Version >= 7 {
-		pe.putInt16(r.ErrorCode)
-		pe.putInt32(r.SessionID)
-	}
-
-	err = pe.putArrayLength(len(r.Blocks))
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-
-		for id, block := range partitions {
-			pe.putInt32(id)
-			err = block.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *FetchResponse) key() int16 {
-	return 1
-}
-
-func (r *FetchResponse) version() int16 {
-	return r.Version
-}
-
-func (r *FetchResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *FetchResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 0:
-		return MinVersion
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_10_1_0
-	case 4, 5:
-		return V0_11_0_0
-	case 6:
-		return V1_0_0_0
-	case 7:
-		return V1_1_0_0
-	case 8:
-		return V2_0_0_0
-	case 9, 10:
-		return V2_1_0_0
-	case 11:
-		return V2_3_0_0
-	default:
-		return MaxVersion
-	}
-}
-
-func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
-	}
-	partitions, ok := r.Blocks[topic]
-	if !ok {
-		partitions = make(map[int32]*FetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	frb, ok := partitions[partition]
-	if !ok {
-		frb = new(FetchResponseBlock)
-		partitions[partition] = frb
-	}
-	frb.Err = err
-}
-
-func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
-	}
-	partitions, ok := r.Blocks[topic]
-	if !ok {
-		partitions = make(map[int32]*FetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	frb, ok := partitions[partition]
-	if !ok {
-		frb = new(FetchResponseBlock)
-		partitions[partition] = frb
-	}
-
-	return frb
-}
-
-func encodeKV(key, value Encoder) ([]byte, []byte) {
-	var kb []byte
-	var vb []byte
-	if key != nil {
-		kb, _ = key.Encode()
-	}
-	if value != nil {
-		vb, _ = value.Encode()
-	}
-
-	return kb, vb
-}
-
-func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-	if r.LogAppendTime {
-		timestamp = r.Timestamp
-	}
-	msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
-	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
-	if len(frb.RecordsSet) == 0 {
-		records := newLegacyRecords(&MessageSet{})
-		frb.RecordsSet = []*Records{&records}
-	}
-	set := frb.RecordsSet[0].MsgSet
-	set.Messages = append(set.Messages, msgBlock)
-}
-
-func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-	if len(frb.RecordsSet) == 0 {
-		records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
-		frb.RecordsSet = []*Records{&records}
-	}
-	batch := frb.RecordsSet[0].RecordBatch
-	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-}
-
-// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
-// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
-// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
-func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-
-	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
-	batch := &RecordBatch{
-		Version:         2,
-		LogAppendTime:   r.LogAppendTime,
-		FirstTimestamp:  timestamp,
-		MaxTimestamp:    r.Timestamp,
-		FirstOffset:     offset,
-		LastOffsetDelta: 0,
-		ProducerID:      producerID,
-		IsTransactional: isTransactional,
-	}
-	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-	records.RecordBatch = batch
-
-	frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-
-	// batch
-	batch := &RecordBatch{
-		Version:         2,
-		LogAppendTime:   r.LogAppendTime,
-		FirstTimestamp:  timestamp,
-		MaxTimestamp:    r.Timestamp,
-		FirstOffset:     offset,
-		LastOffsetDelta: 0,
-		ProducerID:      producerID,
-		IsTransactional: true,
-		Control:         true,
-	}
-
-	// records
-	records := newDefaultRecords(nil)
-	records.RecordBatch = batch
-
-	// record
-	crAbort := ControlRecord{
-		Version: 0,
-		Type:    recordType,
-	}
-	crKey := &realEncoder{raw: make([]byte, 4)}
-	crValue := &realEncoder{raw: make([]byte, 6)}
-	crAbort.encode(crKey, crValue)
-	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-
-	frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
-	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
-}
-
-func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
-	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
-}
-
-func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
-	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
-}
-
-func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
-	// define controlRecord key and value
-	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
-}
-
-func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
-	frb := r.getOrCreateBlock(topic, partition)
-	if len(frb.RecordsSet) == 0 {
-		records := newDefaultRecords(&RecordBatch{Version: 2})
-		frb.RecordsSet = []*Records{&records}
-	}
-	batch := frb.RecordsSet[0].RecordBatch
-	batch.LastOffsetDelta = offset
-}
-
-func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
-	frb := r.getOrCreateBlock(topic, partition)
-	frb.LastStableOffset = offset
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
deleted file mode 100644
index 597bcbf..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sarama
-
-type CoordinatorType int8
-
-const (
-	CoordinatorGroup CoordinatorType = iota
-	CoordinatorTransaction
-)
-
-type FindCoordinatorRequest struct {
-	Version         int16
-	CoordinatorKey  string
-	CoordinatorType CoordinatorType
-}
-
-func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(f.CoordinatorKey); err != nil {
-		return err
-	}
-
-	if f.Version >= 1 {
-		pe.putInt8(int8(f.CoordinatorType))
-	}
-
-	return nil
-}
-
-func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
-	if f.CoordinatorKey, err = pd.getString(); err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		f.Version = version
-		coordinatorType, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-
-		f.CoordinatorType = CoordinatorType(coordinatorType)
-	}
-
-	return nil
-}
-
-func (f *FindCoordinatorRequest) key() int16 {
-	return 10
-}
-
-func (f *FindCoordinatorRequest) version() int16 {
-	return f.Version
-}
-
-func (r *FindCoordinatorRequest) headerVersion() int16 {
-	return 1
-}
-
-func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
-	switch f.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_8_2_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
deleted file mode 100644
index 83a648a..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-var NoNode = &Broker{id: -1, addr: ":-1"}
-
-type FindCoordinatorResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Err          KError
-	ErrMsg       *string
-	Coordinator  *Broker
-}
-
-func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 1 {
-		f.Version = version
-
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-	}
-
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	f.Err = KError(tmp)
-
-	if version >= 1 {
-		if f.ErrMsg, err = pd.getNullableString(); err != nil {
-			return err
-		}
-	}
-
-	coordinator := new(Broker)
-	// The version is hardcoded to 0, as version 1 of the Broker-decode
-	// contains the rack-field which is not present in the FindCoordinatorResponse.
-	if err := coordinator.decode(pd, 0); err != nil {
-		return err
-	}
-	if coordinator.addr == ":0" {
-		return nil
-	}
-	f.Coordinator = coordinator
-
-	return nil
-}
-
-func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
-	if f.Version >= 1 {
-		pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
-	}
-
-	pe.putInt16(int16(f.Err))
-
-	if f.Version >= 1 {
-		if err := pe.putNullableString(f.ErrMsg); err != nil {
-			return err
-		}
-	}
-
-	coordinator := f.Coordinator
-	if coordinator == nil {
-		coordinator = NoNode
-	}
-	if err := coordinator.encode(pe, 0); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (f *FindCoordinatorResponse) key() int16 {
-	return 10
-}
-
-func (f *FindCoordinatorResponse) version() int16 {
-	return f.Version
-}
-
-func (r *FindCoordinatorResponse) headerVersion() int16 {
-	return 0
-}
-
-func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
-	switch f.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_8_2_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
deleted file mode 100644
index ab8b701..0000000
--- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"strings"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"github.com/jcmturner/gokrb5/v8/asn1tools"
-	"github.com/jcmturner/gokrb5/v8/gssapi"
-	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
-	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
-	"github.com/jcmturner/gokrb5/v8/messages"
-	"github.com/jcmturner/gokrb5/v8/types"
-)
-
-const (
-	TOK_ID_KRB_AP_REQ   = 256
-	GSS_API_GENERIC_TAG = 0x60
-	KRB5_USER_AUTH      = 1
-	KRB5_KEYTAB_AUTH    = 2
-	GSS_API_INITIAL     = 1
-	GSS_API_VERIFY      = 2
-	GSS_API_FINISH      = 3
-)
-
-type GSSAPIConfig struct {
-	AuthType           int
-	KeyTabPath         string
-	KerberosConfigPath string
-	ServiceName        string
-	Username           string
-	Password           string
-	Realm              string
-	DisablePAFXFAST    bool
-}
-
-type GSSAPIKerberosAuth struct {
-	Config                *GSSAPIConfig
-	ticket                messages.Ticket
-	encKey                types.EncryptionKey
-	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
-	step                  int
-}
-
-type KerberosClient interface {
-	Login() error
-	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
-	Domain() string
-	CName() types.PrincipalName
-	Destroy()
-}
-
-// writePackage appends length in big endian before the payload, and sends it to kafka
-func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
-	length := uint64(len(payload))
-	size := length + 4 // 4 byte length header + payload
-	if size > math.MaxInt32 {
-		return 0, errors.New("payload too large, will overflow int32")
-	}
-	finalPackage := make([]byte, size)
-	copy(finalPackage[4:], payload)
-	binary.BigEndian.PutUint32(finalPackage, uint32(length))
-	bytes, err := broker.conn.Write(finalPackage)
-	if err != nil {
-		return bytes, err
-	}
-	return bytes, nil
-}
-
-// readPackage reads payload length (4 bytes) and then reads the payload into []byte
-func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
-	bytesRead := 0
-	lengthInBytes := make([]byte, 4)
-	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
-	if err != nil {
-		return nil, bytesRead, err
-	}
-	bytesRead += bytes
-	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
-	payloadBytes := make([]byte, payloadLength)         // buffer for read..
-	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
-	if err != nil {
-		return payloadBytes, bytesRead, err
-	}
-	bytesRead += bytes
-	return payloadBytes, bytesRead, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
-	a := make([]byte, 24)
-	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
-	binary.LittleEndian.PutUint32(a[:4], 16)
-	for _, i := range flags {
-		f := binary.LittleEndian.Uint32(a[20:24])
-		f |= uint32(i)
-		binary.LittleEndian.PutUint32(a[20:24], f)
-	}
-	return a
-}
-
-/*
-*
-* Construct Kerberos AP_REQ package, conforming to RFC-4120
-* https://tools.ietf.org/html/rfc4120#page-84
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
-	domain string, cname types.PrincipalName,
-	ticket messages.Ticket,
-	sessionKey types.EncryptionKey) ([]byte, error) {
-	auth, err := types.NewAuthenticator(domain, cname)
-	if err != nil {
-		return nil, err
-	}
-	auth.Cksum = types.Checksum{
-		CksumType: chksumtype.GSSAPI,
-		Checksum:  krbAuth.newAuthenticatorChecksum(),
-	}
-	APReq, err := messages.NewAPReq(
-		ticket,
-		sessionKey,
-		auth,
-	)
-	if err != nil {
-		return nil, err
-	}
-	aprBytes := make([]byte, 2)
-	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
-	tb, err := APReq.Marshal()
-	if err != nil {
-		return nil, err
-	}
-	aprBytes = append(aprBytes, tb...)
-	return aprBytes, nil
-}
-
-/*
-*
-*	Append the GSS-API header to the payload, conforming to RFC-2743
-*	Section 3.1, Mechanism-Independent Token Format
-*
-*	https://tools.ietf.org/html/rfc2743#page-81
-*
-*	GSSAPIHeader + <specific mechanism payload>
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
-	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
-	if err != nil {
-		return nil, err
-	}
-	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
-	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
-	GSSHeader = append(GSSHeader, oidBytes...)
-	GSSPackage := append(GSSHeader, payload...)
-	return GSSPackage, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
-	switch krbAuth.step {
-	case GSS_API_INITIAL:
-		aprBytes, err := krbAuth.createKrb5Token(
-			kerberosClient.Domain(),
-			kerberosClient.CName(),
-			krbAuth.ticket,
-			krbAuth.encKey)
-		if err != nil {
-			return nil, err
-		}
-		krbAuth.step = GSS_API_VERIFY
-		return krbAuth.appendGSSAPIHeader(aprBytes)
-	case GSS_API_VERIFY:
-		wrapTokenReq := gssapi.WrapToken{}
-		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
-			return nil, err
-		}
-		// Validate response.
-		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
-		if !isValid {
-			return nil, err
-		}
-
-		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
-		if err != nil {
-			return nil, err
-		}
-		krbAuth.step = GSS_API_FINISH
-		return wrapTokenResponse.Marshal()
-	}
-	return nil, nil
-}
-
-/* This does the handshake for authorization */
-func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
-	if err != nil {
-		Logger.Printf("Kerberos client error: %s", err)
-		return err
-	}
-
-	err = kerberosClient.Login()
-	if err != nil {
-		Logger.Printf("Kerberos client error: %s", err)
-		return err
-	}
-	// Construct SPN using serviceName and host
-	// SPN format: <SERVICE>/<FQDN>
-
-	host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
-	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
-
-	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
-	if err != nil {
-		Logger.Printf("Error getting Kerberos service ticket : %s", err)
-		return err
-	}
-	krbAuth.ticket = ticket
-	krbAuth.encKey = encKey
-	krbAuth.step = GSS_API_INITIAL
-	var receivedBytes []byte = nil
-	defer kerberosClient.Destroy()
-	for {
-		packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
-		if err != nil {
-			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-			return err
-		}
-		requestTime := time.Now()
-		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
-		if err != nil {
-			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-			return err
-		}
-		broker.updateOutgoingCommunicationMetrics(bytesWritten)
-		if krbAuth.step == GSS_API_VERIFY {
-			bytesRead := 0
-			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
-			requestLatency := time.Since(requestTime)
-			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-			if err != nil {
-				Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-				return err
-			}
-		} else if krbAuth.step == GSS_API_FINISH {
-			return nil
-		}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
deleted file mode 100644
index e9d9af1..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sarama
-
-type HeartbeatRequest struct {
-	GroupId      string
-	GenerationId int32
-	MemberId     string
-}
-
-func (r *HeartbeatRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (r *HeartbeatRequest) key() int16 {
-	return 12
-}
-
-func (r *HeartbeatRequest) version() int16 {
-	return 0
-}
-
-func (r *HeartbeatRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
deleted file mode 100644
index 577ab72..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type HeartbeatResponse struct {
-	Err KError
-}
-
-func (r *HeartbeatResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return nil
-}
-
-func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(kerr)
-
-	return nil
-}
-
-func (r *HeartbeatResponse) key() int16 {
-	return 12
-}
-
-func (r *HeartbeatResponse) version() int16 {
-	return 0
-}
-
-func (r *HeartbeatResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
deleted file mode 100644
index c4d05a9..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package sarama
-
-type IncrementalAlterConfigsOperation int8
-
-const (
-	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
-	IncrementalAlterConfigsOperationDelete
-	IncrementalAlterConfigsOperationAppend
-	IncrementalAlterConfigsOperationSubtract
-)
-
-// IncrementalAlterConfigsRequest is an incremental alter config request type
-type IncrementalAlterConfigsRequest struct {
-	Resources    []*IncrementalAlterConfigsResource
-	ValidateOnly bool
-}
-
-type IncrementalAlterConfigsResource struct {
-	Type          ConfigResourceType
-	Name          string
-	ConfigEntries map[string]IncrementalAlterConfigsEntry
-}
-
-type IncrementalAlterConfigsEntry struct {
-	Operation IncrementalAlterConfigsOperation
-	Value     *string
-}
-
-func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, r := range a.Resources {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putBool(a.ValidateOnly)
-	return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
-	resourceCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
-	for i := range a.Resources {
-		r := &IncrementalAlterConfigsResource{}
-		err = r.decode(pd, version)
-		if err != nil {
-			return err
-		}
-		a.Resources[i] = r
-	}
-
-	validateOnly, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-
-	a.ValidateOnly = validateOnly
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Type))
-
-	if err := pe.putString(a.Name); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
-		return err
-	}
-
-	for name, e := range a.ConfigEntries {
-		if err := pe.putString(name); err != nil {
-			return err
-		}
-
-		if err := e.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
-		for i := 0; i < n; i++ {
-			name, err := pd.getString()
-			if err != nil {
-				return err
-			}
-
-			var v IncrementalAlterConfigsEntry
-
-			if err := v.decode(pd, version); err != nil {
-				return err
-			}
-
-			a.ConfigEntries[name] = v
-		}
-	}
-	return err
-}
-
-func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Operation))
-
-	if err := pe.putNullableString(a.Value); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Operation = IncrementalAlterConfigsOperation(t)
-
-	s, err := pd.getNullableString()
-	if err != nil {
-		return err
-	}
-
-	a.Value = s
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) key() int16 {
-	return 44
-}
-
-func (a *IncrementalAlterConfigsRequest) version() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
-	return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
deleted file mode 100644
index 3e8c450..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package sarama
-
-import "time"
-
-// IncrementalAlterConfigsResponse is a response type for incremental alter config
-type IncrementalAlterConfigsResponse struct {
-	ThrottleTime time.Duration
-	Resources    []*AlterConfigsResourceResponse
-}
-
-func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, v := range a.Resources {
-		if err := v.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	responseCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
-	for i := range a.Resources {
-		a.Resources[i] = new(AlterConfigsResourceResponse)
-
-		if err := a.Resources[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) key() int16 {
-	return 44
-}
-
-func (a *IncrementalAlterConfigsResponse) version() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
-	return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
deleted file mode 100644
index 6894443..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDRequest struct {
-	TransactionalID    *string
-	TransactionTimeout time.Duration
-}
-
-func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
-	if err := pe.putNullableString(i.TransactionalID); err != nil {
-		return err
-	}
-	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
-
-	return nil
-}
-
-func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
-	if i.TransactionalID, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
-
-	return nil
-}
-
-func (i *InitProducerIDRequest) key() int16 {
-	return 22
-}
-
-func (i *InitProducerIDRequest) version() int16 {
-	return 0
-}
-
-func (i *InitProducerIDRequest) headerVersion() int16 {
-	return 1
-}
-
-func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
deleted file mode 100644
index 3e1242b..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDResponse struct {
-	ThrottleTime  time.Duration
-	Err           KError
-	ProducerID    int64
-	ProducerEpoch int16
-}
-
-func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(i.Err))
-	pe.putInt64(i.ProducerID)
-	pe.putInt16(i.ProducerEpoch)
-
-	return nil
-}
-
-func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	i.Err = KError(kerr)
-
-	if i.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (i *InitProducerIDResponse) key() int16 {
-	return 22
-}
-
-func (i *InitProducerIDResponse) version() int16 {
-	return 0
-}
-
-func (i *InitProducerIDResponse) headerVersion() int16 {
-	return 0
-}
-
-func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go
deleted file mode 100644
index d0d33e5..0000000
--- a/vendor/github.com/Shopify/sarama/interceptors.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-// ProducerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the producer before they are published to the Kafka cluster.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ProducerInterceptor interface {
-
-	// OnSend is called when the producer message is intercepted. Please avoid
-	// modifying the message until it's safe to do so, as this is _not_ a copy
-	// of the message.
-	OnSend(*ProducerMessage)
-}
-
-// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the consumer before they are sent to the messages channel.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ConsumerInterceptor interface {
-
-	// OnConsume is called when the consumed message is intercepted. Please
-	// avoid modifying the message until it's safe to do so, as this is _not_ a
-	// copy of the message.
-	OnConsume(*ConsumerMessage)
-}
-
-func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
-		}
-	}()
-
-	interceptor.OnSend(msg)
-}
-
-func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
-		}
-	}()
-
-	interceptor.OnConsume(msg)
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
deleted file mode 100644
index 3734e82..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package sarama
-
-type GroupProtocol struct {
-	Name     string
-	Metadata []byte
-}
-
-func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
-	p.Name, err = pd.getString()
-	if err != nil {
-		return err
-	}
-	p.Metadata, err = pd.getBytes()
-	return err
-}
-
-func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
-	if err := pe.putString(p.Name); err != nil {
-		return err
-	}
-	if err := pe.putBytes(p.Metadata); err != nil {
-		return err
-	}
-	return nil
-}
-
-type JoinGroupRequest struct {
-	Version               int16
-	GroupId               string
-	SessionTimeout        int32
-	RebalanceTimeout      int32
-	MemberId              string
-	ProtocolType          string
-	GroupProtocols        map[string][]byte // deprecated; use OrderedGroupProtocols
-	OrderedGroupProtocols []*GroupProtocol
-}
-
-func (r *JoinGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-	pe.putInt32(r.SessionTimeout)
-	if r.Version >= 1 {
-		pe.putInt32(r.RebalanceTimeout)
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.ProtocolType); err != nil {
-		return err
-	}
-
-	if len(r.GroupProtocols) > 0 {
-		if len(r.OrderedGroupProtocols) > 0 {
-			return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
-		}
-
-		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
-			return err
-		}
-		for name, metadata := range r.GroupProtocols {
-			if err := pe.putString(name); err != nil {
-				return err
-			}
-			if err := pe.putBytes(metadata); err != nil {
-				return err
-			}
-		}
-	} else {
-		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
-			return err
-		}
-		for _, protocol := range r.OrderedGroupProtocols {
-			if err := protocol.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.SessionTimeout, err = pd.getInt32(); err != nil {
-		return
-	}
-
-	if version >= 1 {
-		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.ProtocolType, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupProtocols = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		protocol := &GroupProtocol{}
-		if err := protocol.decode(pd); err != nil {
-			return err
-		}
-		r.GroupProtocols[protocol.Name] = protocol.Metadata
-		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
-	}
-
-	return nil
-}
-
-func (r *JoinGroupRequest) key() int16 {
-	return 11
-}
-
-func (r *JoinGroupRequest) version() int16 {
-	return r.Version
-}
-
-func (r *JoinGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 2:
-		return V0_11_0_0
-	case 1:
-		return V0_10_1_0
-	default:
-		return V0_9_0_0
-	}
-}
-
-func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
-	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
-		Name:     name,
-		Metadata: metadata,
-	})
-}
-
-func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
-	bin, err := encode(metadata, nil)
-	if err != nil {
-		return err
-	}
-
-	r.AddGroupProtocol(name, bin)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
deleted file mode 100644
index 54b0a45..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package sarama
-
-type JoinGroupResponse struct {
-	Version       int16
-	ThrottleTime  int32
-	Err           KError
-	GenerationId  int32
-	GroupProtocol string
-	LeaderId      string
-	MemberId      string
-	Members       map[string][]byte
-}
-
-func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
-	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
-	for id, bin := range r.Members {
-		meta := new(ConsumerGroupMemberMetadata)
-		if err := decode(bin, meta); err != nil {
-			return nil, err
-		}
-		members[id] = *meta
-	}
-	return members, nil
-}
-
-func (r *JoinGroupResponse) encode(pe packetEncoder) error {
-	if r.Version >= 2 {
-		pe.putInt32(r.ThrottleTime)
-	}
-	pe.putInt16(int16(r.Err))
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.GroupProtocol); err != nil {
-		return err
-	}
-	if err := pe.putString(r.LeaderId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Members)); err != nil {
-		return err
-	}
-
-	for memberId, memberMetadata := range r.Members {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-
-		if err := pe.putBytes(memberMetadata); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 2 {
-		if r.ThrottleTime, err = pd.getInt32(); err != nil {
-			return
-		}
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-
-	if r.GroupProtocol, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.LeaderId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.Members = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		memberMetadata, err := pd.getBytes()
-		if err != nil {
-			return err
-		}
-
-		r.Members[memberId] = memberMetadata
-	}
-
-	return nil
-}
-
-func (r *JoinGroupResponse) key() int16 {
-	return 11
-}
-
-func (r *JoinGroupResponse) version() int16 {
-	return r.Version
-}
-
-func (r *JoinGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 2:
-		return V0_11_0_0
-	case 1:
-		return V0_10_1_0
-	default:
-		return V0_9_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
deleted file mode 100644
index 01a5319..0000000
--- a/vendor/github.com/Shopify/sarama/kerberos_client.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package sarama
-
-import (
-	krb5client "github.com/jcmturner/gokrb5/v8/client"
-	krb5config "github.com/jcmturner/gokrb5/v8/config"
-	"github.com/jcmturner/gokrb5/v8/keytab"
-	"github.com/jcmturner/gokrb5/v8/types"
-)
-
-type KerberosGoKrb5Client struct {
-	krb5client.Client
-}
-
-func (c *KerberosGoKrb5Client) Domain() string {
-	return c.Credentials.Domain()
-}
-
-func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
-	return c.Credentials.CName()
-}
-
-// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
-// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
-// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
-func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
-	cfg, err := krb5config.Load(config.KerberosConfigPath)
-	if err != nil {
-		return nil, err
-	}
-	return createClient(config, cfg)
-}
-
-func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
-	var client *krb5client.Client
-	if config.AuthType == KRB5_KEYTAB_AUTH {
-		kt, err := keytab.Load(config.KeyTabPath)
-		if err != nil {
-			return nil, err
-		}
-		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
-	} else {
-		client = krb5client.NewWithPassword(config.Username,
-			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
-	}
-	return &KerberosGoKrb5Client{*client}, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
deleted file mode 100644
index d7789b6..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package sarama
-
-type LeaveGroupRequest struct {
-	GroupId  string
-	MemberId string
-}
-
-func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (r *LeaveGroupRequest) key() int16 {
-	return 13
-}
-
-func (r *LeaveGroupRequest) version() int16 {
-	return 0
-}
-
-func (r *LeaveGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
deleted file mode 100644
index 25f8d5e..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type LeaveGroupResponse struct {
-	Err KError
-}
-
-func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return nil
-}
-
-func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(kerr)
-
-	return nil
-}
-
-func (r *LeaveGroupResponse) key() int16 {
-	return 13
-}
-
-func (r *LeaveGroupResponse) version() int16 {
-	return 0
-}
-
-func (r *LeaveGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
deleted file mode 100644
index 4553b2d..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sarama
-
-type ListGroupsRequest struct{}
-
-func (r *ListGroupsRequest) encode(pe packetEncoder) error {
-	return nil
-}
-
-func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	return nil
-}
-
-func (r *ListGroupsRequest) key() int16 {
-	return 16
-}
-
-func (r *ListGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *ListGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
deleted file mode 100644
index 777bae7..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-type ListGroupsResponse struct {
-	Err    KError
-	Groups map[string]string
-}
-
-func (r *ListGroupsResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-
-	if err := pe.putArrayLength(len(r.Groups)); err != nil {
-		return err
-	}
-	for groupId, protocolType := range r.Groups {
-		if err := pe.putString(groupId); err != nil {
-			return err
-		}
-		if err := pe.putString(protocolType); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.Groups = make(map[string]string)
-	for i := 0; i < n; i++ {
-		groupId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		protocolType, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		r.Groups[groupId] = protocolType
-	}
-
-	return nil
-}
-
-func (r *ListGroupsResponse) key() int16 {
-	return 16
-}
-
-func (r *ListGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *ListGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
deleted file mode 100644
index c1ffa9b..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package sarama
-
-type ListPartitionReassignmentsRequest struct {
-	TimeoutMs int32
-	blocks    map[string][]int32
-	Version   int16
-}
-
-func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
-	pe.putInt32(r.TimeoutMs)
-
-	pe.putCompactArrayLength(len(r.blocks))
-
-	for topic, partitions := range r.blocks {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-
-		if err := pe.putCompactInt32Array(partitions); err != nil {
-			return err
-		}
-
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.TimeoutMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	topicCount, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount > 0 {
-		r.blocks = make(map[string][]int32)
-		for i := 0; i < topicCount; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-			r.blocks[topic] = make([]int32, partitionCount)
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				r.blocks[topic][j] = partition
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return
-}
-
-func (r *ListPartitionReassignmentsRequest) key() int16 {
-	return 46
-}
-
-func (r *ListPartitionReassignmentsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
-
-func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string][]int32)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = partitionIDs
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
deleted file mode 100644
index 4baa6a0..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package sarama
-
-type PartitionReplicaReassignmentsStatus struct {
-	Replicas         []int32
-	AddingReplicas   []int32
-	RemovingReplicas []int32
-}
-
-func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
-	if err := pe.putCompactInt32Array(b.Replicas); err != nil {
-		return err
-	}
-	if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
-		return err
-	}
-	if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
-		return err
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
-	if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return err
-}
-
-type ListPartitionReassignmentsResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	ErrorCode      KError
-	ErrorMessage   *string
-	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
-}
-
-func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
-	if r.TopicStatus == nil {
-		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
-	}
-	partitions := r.TopicStatus[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
-		r.TopicStatus[topic] = partitions
-	}
-
-	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
-}
-
-func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(r.ThrottleTimeMs)
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.TopicStatus))
-	for topic, partitions := range r.TopicStatus {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numTopics, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
-	for i := 0; i < numTopics; i++ {
-		topic, err := pd.getCompactString()
-		if err != nil {
-			return err
-		}
-
-		ongoingPartitionReassignments, err := pd.getCompactArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
-
-		for j := 0; j < ongoingPartitionReassignments; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := &PartitionReplicaReassignmentsStatus{}
-			if err := block.decode(pd); err != nil {
-				return err
-			}
-			r.TopicStatus[topic][partition] = block
-		}
-
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) key() int16 {
-	return 46
-}
-
-func (r *ListPartitionReassignmentsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
-	return 1
-}
-
-func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
deleted file mode 100644
index fd0d1d9..0000000
--- a/vendor/github.com/Shopify/sarama/message.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-const (
-	// CompressionNone no compression
-	CompressionNone CompressionCodec = iota
-	// CompressionGZIP compression using GZIP
-	CompressionGZIP
-	// CompressionSnappy compression using snappy
-	CompressionSnappy
-	// CompressionLZ4 compression using LZ4
-	CompressionLZ4
-	// CompressionZSTD compression using ZSTD
-	CompressionZSTD
-
-	// The lowest 3 bits contain the compression codec used for the message
-	compressionCodecMask int8 = 0x07
-
-	// Bit 3 set for "LogAppend" timestamps
-	timestampTypeMask = 0x08
-
-	// CompressionLevelDefault is the constant to use in CompressionLevel
-	// to have the default compression level for any codec. The value is picked
-	// that we don't use any existing compression levels.
-	CompressionLevelDefault = -1000
-)
-
-// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
-type CompressionCodec int8
-
-func (cc CompressionCodec) String() string {
-	return []string{
-		"none",
-		"gzip",
-		"snappy",
-		"lz4",
-		"zstd",
-	}[int(cc)]
-}
-
-// Message is a kafka message type
-type Message struct {
-	Codec            CompressionCodec // codec used to compress the message contents
-	CompressionLevel int              // compression level
-	LogAppendTime    bool             // the used timestamp is LogAppendTime
-	Key              []byte           // the message key, may be nil
-	Value            []byte           // the message contents
-	Set              *MessageSet      // the message set a message might wrap
-	Version          int8             // v1 requires Kafka 0.10
-	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
-
-	compressedCache []byte
-	compressedSize  int // used for computing the compression ratio metrics
-}
-
-func (m *Message) encode(pe packetEncoder) error {
-	pe.push(newCRC32Field(crcIEEE))
-
-	pe.putInt8(m.Version)
-
-	attributes := int8(m.Codec) & compressionCodecMask
-	if m.LogAppendTime {
-		attributes |= timestampTypeMask
-	}
-	pe.putInt8(attributes)
-
-	if m.Version >= 1 {
-		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
-			return err
-		}
-	}
-
-	err := pe.putBytes(m.Key)
-	if err != nil {
-		return err
-	}
-
-	var payload []byte
-
-	if m.compressedCache != nil {
-		payload = m.compressedCache
-		m.compressedCache = nil
-	} else if m.Value != nil {
-		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
-		if err != nil {
-			return err
-		}
-		m.compressedCache = payload
-		// Keep in mind the compressed payload size for metric gathering
-		m.compressedSize = len(payload)
-	}
-
-	if err = pe.putBytes(payload); err != nil {
-		return err
-	}
-
-	return pe.pop()
-}
-
-func (m *Message) decode(pd packetDecoder) (err error) {
-	crc32Decoder := acquireCrc32Field(crcIEEE)
-	defer releaseCrc32Field(crc32Decoder)
-
-	err = pd.push(crc32Decoder)
-	if err != nil {
-		return err
-	}
-
-	m.Version, err = pd.getInt8()
-	if err != nil {
-		return err
-	}
-
-	if m.Version > 1 {
-		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
-	}
-
-	attribute, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	m.Codec = CompressionCodec(attribute & compressionCodecMask)
-	m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
-
-	if m.Version == 1 {
-		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
-			return err
-		}
-	}
-
-	m.Key, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	m.Value, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	// Required for deep equal assertion during tests but might be useful
-	// for future metrics about the compression ratio in fetch requests
-	m.compressedSize = len(m.Value)
-
-	if m.Value != nil && m.Codec != CompressionNone {
-		m.Value, err = decompress(m.Codec, m.Value)
-		if err != nil {
-			return err
-		}
-
-		if err := m.decodeSet(); err != nil {
-			return err
-		}
-	}
-
-	return pd.pop()
-}
-
-// decodes a message set from a previously encoded bulk-message
-func (m *Message) decodeSet() (err error) {
-	pd := realDecoder{raw: m.Value}
-	m.Set = &MessageSet{}
-	return m.Set.decode(&pd)
-}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
deleted file mode 100644
index 6523ec2..0000000
--- a/vendor/github.com/Shopify/sarama/message_set.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package sarama
-
-type MessageBlock struct {
-	Offset int64
-	Msg    *Message
-}
-
-// Messages convenience helper which returns either all the
-// messages that are wrapped in this block
-func (msb *MessageBlock) Messages() []*MessageBlock {
-	if msb.Msg.Set != nil {
-		return msb.Msg.Set.Messages
-	}
-	return []*MessageBlock{msb}
-}
-
-func (msb *MessageBlock) encode(pe packetEncoder) error {
-	pe.putInt64(msb.Offset)
-	pe.push(&lengthField{})
-	err := msb.Msg.encode(pe)
-	if err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
-	if msb.Offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	lengthDecoder := acquireLengthField()
-	defer releaseLengthField(lengthDecoder)
-
-	if err = pd.push(lengthDecoder); err != nil {
-		return err
-	}
-
-	msb.Msg = new(Message)
-	if err = msb.Msg.decode(pd); err != nil {
-		return err
-	}
-
-	if err = pd.pop(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-type MessageSet struct {
-	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
-	OverflowMessage        bool // whether the set on the wire contained an overflow message
-	Messages               []*MessageBlock
-}
-
-func (ms *MessageSet) encode(pe packetEncoder) error {
-	for i := range ms.Messages {
-		err := ms.Messages[i].encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (ms *MessageSet) decode(pd packetDecoder) (err error) {
-	ms.Messages = nil
-
-	for pd.remaining() > 0 {
-		magic, err := magicValue(pd)
-		if err != nil {
-			if err == ErrInsufficientData {
-				ms.PartialTrailingMessage = true
-				return nil
-			}
-			return err
-		}
-
-		if magic > 1 {
-			return nil
-		}
-
-		msb := new(MessageBlock)
-		err = msb.decode(pd)
-		switch err {
-		case nil:
-			ms.Messages = append(ms.Messages, msb)
-		case ErrInsufficientData:
-			// As an optimization the server is allowed to return a partial message at the
-			// end of the message set. Clients should handle this case. So we just ignore such things.
-			if msb.Offset == -1 {
-				// This is an overflow message caused by chunked down conversion
-				ms.OverflowMessage = true
-			} else {
-				ms.PartialTrailingMessage = true
-			}
-			return nil
-		default:
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (ms *MessageSet) addMessage(msg *Message) {
-	block := new(MessageBlock)
-	block.Msg = msg
-	ms.Messages = append(ms.Messages, block)
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
deleted file mode 100644
index e835f5a..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-type MetadataRequest struct {
-	Version                int16
-	Topics                 []string
-	AllowAutoTopicCreation bool
-}
-
-func (r *MetadataRequest) encode(pe packetEncoder) error {
-	if r.Version < 0 || r.Version > 5 {
-		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
-	}
-	if r.Version == 0 || len(r.Topics) > 0 {
-		err := pe.putArrayLength(len(r.Topics))
-		if err != nil {
-			return err
-		}
-
-		for i := range r.Topics {
-			err = pe.putString(r.Topics[i])
-			if err != nil {
-				return err
-			}
-		}
-	} else {
-		pe.putInt32(-1)
-	}
-	if r.Version > 3 {
-		pe.putBool(r.AllowAutoTopicCreation)
-	}
-	return nil
-}
-
-func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-	size, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if size > 0 {
-		r.Topics = make([]string, size)
-		for i := range r.Topics {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			r.Topics[i] = topic
-		}
-	}
-	if r.Version > 3 {
-		autoCreation, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.AllowAutoTopicCreation = autoCreation
-	}
-	return nil
-}
-
-func (r *MetadataRequest) key() int16 {
-	return 3
-}
-
-func (r *MetadataRequest) version() int16 {
-	return r.Version
-}
-
-func (r *MetadataRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *MetadataRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_0_0
-	case 2:
-		return V0_10_1_0
-	case 3, 4:
-		return V0_11_0_0
-	case 5:
-		return V1_0_0_0
-	default:
-		return MinVersion
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
deleted file mode 100644
index 0bb8702..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package sarama
-
-type PartitionMetadata struct {
-	Err             KError
-	ID              int32
-	Leader          int32
-	Replicas        []int32
-	Isr             []int32
-	OfflineReplicas []int32
-}
-
-func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	pm.Err = KError(tmp)
-
-	pm.ID, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Leader, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Replicas, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	pm.Isr, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		pm.OfflineReplicas, err = pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(pm.Err))
-	pe.putInt32(pm.ID)
-	pe.putInt32(pm.Leader)
-
-	err = pe.putInt32Array(pm.Replicas)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putInt32Array(pm.Isr)
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		err = pe.putInt32Array(pm.OfflineReplicas)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type TopicMetadata struct {
-	Err        KError
-	Name       string
-	IsInternal bool // Only valid for Version >= 1
-	Partitions []*PartitionMetadata
-}
-
-func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	tm.Err = KError(tmp)
-
-	tm.Name, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		tm.IsInternal, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	tm.Partitions = make([]*PartitionMetadata, n)
-	for i := 0; i < n; i++ {
-		tm.Partitions[i] = new(PartitionMetadata)
-		err = tm.Partitions[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(tm.Err))
-
-	err = pe.putString(tm.Name)
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		pe.putBool(tm.IsInternal)
-	}
-
-	err = pe.putArrayLength(len(tm.Partitions))
-	if err != nil {
-		return err
-	}
-
-	for _, pm := range tm.Partitions {
-		err = pm.encode(pe, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type MetadataResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Brokers        []*Broker
-	ClusterID      *string
-	ControllerID   int32
-	Topics         []*TopicMetadata
-}
-
-func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Brokers = make([]*Broker, n)
-	for i := 0; i < n; i++ {
-		r.Brokers[i] = new(Broker)
-		err = r.Brokers[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	if version >= 2 {
-		r.ClusterID, err = pd.getNullableString()
-		if err != nil {
-			return err
-		}
-	}
-
-	if version >= 1 {
-		r.ControllerID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	} else {
-		r.ControllerID = -1
-	}
-
-	n, err = pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Topics = make([]*TopicMetadata, n)
-	for i := 0; i < n; i++ {
-		r.Topics[i] = new(TopicMetadata)
-		err = r.Topics[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *MetadataResponse) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-
-	err := pe.putArrayLength(len(r.Brokers))
-	if err != nil {
-		return err
-	}
-	for _, broker := range r.Brokers {
-		err = broker.encode(pe, r.Version)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 2 {
-		err := pe.putNullableString(r.ClusterID)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(r.ControllerID)
-	}
-
-	err = pe.putArrayLength(len(r.Topics))
-	if err != nil {
-		return err
-	}
-	for _, tm := range r.Topics {
-		err = tm.encode(pe, r.Version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *MetadataResponse) key() int16 {
-	return 3
-}
-
-func (r *MetadataResponse) version() int16 {
-	return r.Version
-}
-
-func (r *MetadataResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *MetadataResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_0_0
-	case 2:
-		return V0_10_1_0
-	case 3, 4:
-		return V0_11_0_0
-	case 5:
-		return V1_0_0_0
-	default:
-		return MinVersion
-	}
-}
-
-// testing API
-
-func (r *MetadataResponse) AddBroker(addr string, id int32) {
-	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
-}
-
-func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
-	var tmatch *TopicMetadata
-
-	for _, tm := range r.Topics {
-		if tm.Name == topic {
-			tmatch = tm
-			goto foundTopic
-		}
-	}
-
-	tmatch = new(TopicMetadata)
-	tmatch.Name = topic
-	r.Topics = append(r.Topics, tmatch)
-
-foundTopic:
-
-	tmatch.Err = err
-	return tmatch
-}
-
-func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
-	tmatch := r.AddTopic(topic, ErrNoError)
-	var pmatch *PartitionMetadata
-
-	for _, pm := range tmatch.Partitions {
-		if pm.ID == partition {
-			pmatch = pm
-			goto foundPartition
-		}
-	}
-
-	pmatch = new(PartitionMetadata)
-	pmatch.ID = partition
-	tmatch.Partitions = append(tmatch.Partitions, pmatch)
-
-foundPartition:
-
-	pmatch.Leader = brokerID
-	pmatch.Replicas = replicas
-	pmatch.Isr = isr
-	pmatch.OfflineReplicas = offline
-	pmatch.Err = err
-}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
deleted file mode 100644
index 90e5a87..0000000
--- a/vendor/github.com/Shopify/sarama/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
-// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
-// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
-// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
-const (
-	metricsReservoirSize = 1028
-	metricsAlphaFactor   = 0.015
-)
-
-func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
-	return r.GetOrRegister(name, func() metrics.Histogram {
-		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
-	}).(metrics.Histogram)
-}
-
-func getMetricNameForBroker(name string, broker *Broker) string {
-	// Use broker id like the Java client as it does not contain '.' or ':' characters that
-	// can be interpreted as special character by monitoring tool (e.g. Graphite)
-	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
-}
-
-func getMetricNameForTopic(name string, topic string) string {
-	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
-	// cf. KAFKA-1902 and KAFKA-2337
-	return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
-}
-
-func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
-	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
-}
-
-func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
-	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
-}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
deleted file mode 100644
index c2654d1..0000000
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ /dev/null
@@ -1,422 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"net"
-	"reflect"
-	"strconv"
-	"sync"
-	"time"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-const (
-	expectationTimeout = 500 * time.Millisecond
-)
-
-type GSSApiHandlerFunc func([]byte) []byte
-
-type requestHandlerFunc func(req *request) (res encoderWithHeader)
-
-// RequestNotifierFunc is invoked when a mock broker processes a request successfully
-// and will provides the number of bytes read and written.
-type RequestNotifierFunc func(bytesRead, bytesWritten int)
-
-// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
-// to facilitate testing of higher level or specialized consumers and producers
-// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
-// but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshalling, response marshalling, and makes it the test
-// writer responsibility to program correct according to the Kafka API protocol
-// MockBroker behaviour.
-//
-// MockBroker is implemented as a TCP server listening on a kernel-selected
-// localhost port that can accept many connections. It reads Kafka requests
-// from that connection and returns responses programmed by the SetHandlerByMap
-// function. If a MockBroker receives a request that it has no programmed
-// response for, then it returns nothing and the request times out.
-//
-// A set of MockRequest builders to define mappings used by MockBroker is
-// provided by Sarama. But users can develop MockRequests of their own and use
-// them along with or instead of the standard ones.
-//
-// When running tests with MockBroker it is strongly recommended to specify
-// a timeout to `go test` so that if the broker hangs waiting for a response,
-// the test panics.
-//
-// It is not necessary to prefix message length or correlation ID to your
-// response bytes, the server does that automatically as a convenience.
-type MockBroker struct {
-	brokerID      int32
-	port          int32
-	closing       chan none
-	stopper       chan none
-	expectations  chan encoderWithHeader
-	listener      net.Listener
-	t             TestReporter
-	latency       time.Duration
-	handler       requestHandlerFunc
-	notifier      RequestNotifierFunc
-	history       []RequestResponse
-	lock          sync.Mutex
-	gssApiHandler GSSApiHandlerFunc
-}
-
-// RequestResponse represents a Request/Response pair processed by MockBroker.
-type RequestResponse struct {
-	Request  protocolBody
-	Response encoder
-}
-
-// SetLatency makes broker pause for the specified period every time before
-// replying.
-func (b *MockBroker) SetLatency(latency time.Duration) {
-	b.latency = latency
-}
-
-// SetHandlerByMap defines mapping of Request types to MockResponses. When a
-// request is received by the broker, it looks up the request type in the map
-// and uses the found MockResponse instance to generate an appropriate reply.
-// If the request type is not found in the map then nothing is sent.
-func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
-	b.setHandler(func(req *request) (res encoderWithHeader) {
-		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
-		mockResponse := handlerMap[reqTypeName]
-		if mockResponse == nil {
-			return nil
-		}
-		return mockResponse.For(req.body)
-	})
-}
-
-// SetNotifier set a function that will get invoked whenever a request has been
-// processed successfully and will provide the number of bytes read and written
-func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
-	b.lock.Lock()
-	b.notifier = notifier
-	b.lock.Unlock()
-}
-
-// BrokerID returns broker ID assigned to the broker.
-func (b *MockBroker) BrokerID() int32 {
-	return b.brokerID
-}
-
-// History returns a slice of RequestResponse pairs in the order they were
-// processed by the broker. Note that in case of multiple connections to the
-// broker the order expected by a test can be different from the order recorded
-// in the history, unless some synchronization is implemented in the test.
-func (b *MockBroker) History() []RequestResponse {
-	b.lock.Lock()
-	history := make([]RequestResponse, len(b.history))
-	copy(history, b.history)
-	b.lock.Unlock()
-	return history
-}
-
-// Port returns the TCP port number the broker is listening for requests on.
-func (b *MockBroker) Port() int32 {
-	return b.port
-}
-
-// Addr returns the broker connection string in the form "<address>:<port>".
-func (b *MockBroker) Addr() string {
-	return b.listener.Addr().String()
-}
-
-// Close terminates the broker blocking until it stops internal goroutines and
-// releases all resources.
-func (b *MockBroker) Close() {
-	close(b.expectations)
-	if len(b.expectations) > 0 {
-		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
-		for e := range b.expectations {
-			_, _ = buf.WriteString(spew.Sdump(e))
-		}
-		b.t.Error(buf.String())
-	}
-	close(b.closing)
-	<-b.stopper
-}
-
-// setHandler sets the specified function as the request handler. Whenever
-// a mock broker reads a request from the wire it passes the request to the
-// function and sends back whatever the handler function returns.
-func (b *MockBroker) setHandler(handler requestHandlerFunc) {
-	b.lock.Lock()
-	b.handler = handler
-	b.lock.Unlock()
-}
-
-func (b *MockBroker) serverLoop() {
-	defer close(b.stopper)
-	var err error
-	var conn net.Conn
-
-	go func() {
-		<-b.closing
-		err := b.listener.Close()
-		if err != nil {
-			b.t.Error(err)
-		}
-	}()
-
-	wg := &sync.WaitGroup{}
-	i := 0
-	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
-		wg.Add(1)
-		go b.handleRequests(conn, i, wg)
-		i++
-	}
-	wg.Wait()
-	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
-}
-
-func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
-	b.gssApiHandler = handler
-}
-
-func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
-	var (
-		bytesRead   int
-		lengthBytes = make([]byte, 4)
-	)
-
-	if _, err := io.ReadFull(r, lengthBytes); err != nil {
-		return nil, err
-	}
-
-	bytesRead += len(lengthBytes)
-	length := int32(binary.BigEndian.Uint32(lengthBytes))
-
-	if length <= 4 || length > MaxRequestSize {
-		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
-	}
-
-	encodedReq := make([]byte, length)
-	if _, err := io.ReadFull(r, encodedReq); err != nil {
-		return nil, err
-	}
-
-	bytesRead += len(encodedReq)
-
-	fullBytes := append(lengthBytes, encodedReq...)
-
-	return fullBytes, nil
-}
-
-func (b *MockBroker) isGSSAPI(buffer []byte) bool {
-	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
-}
-
-func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
-	defer wg.Done()
-	defer func() {
-		_ = conn.Close()
-	}()
-	s := spew.NewDefaultConfig()
-	s.MaxDepth = 1
-	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
-	var err error
-
-	abort := make(chan none)
-	defer close(abort)
-	go func() {
-		select {
-		case <-b.closing:
-			_ = conn.Close()
-		case <-abort:
-		}
-	}()
-
-	var bytesWritten int
-	var bytesRead int
-	for {
-		buffer, err := b.readToBytes(conn)
-		if err != nil {
-			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
-			b.serverError(err)
-			break
-		}
-
-		bytesWritten = 0
-		if !b.isGSSAPI(buffer) {
-			req, br, err := decodeRequest(bytes.NewReader(buffer))
-			bytesRead = br
-			if err != nil {
-				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
-				b.serverError(err)
-				break
-			}
-
-			if b.latency > 0 {
-				time.Sleep(b.latency)
-			}
-
-			b.lock.Lock()
-			res := b.handler(req)
-			b.history = append(b.history, RequestResponse{req.body, res})
-			b.lock.Unlock()
-
-			if res == nil {
-				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
-				continue
-			}
-			Logger.Printf(
-				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
-				b.brokerID, idx, req.body, res,
-				s.Sprintf("%#v", req.body),
-				s.Sprintf("%#v", res),
-			)
-
-			encodedRes, err := encode(res, nil)
-			if err != nil {
-				b.serverError(err)
-				break
-			}
-			if len(encodedRes) == 0 {
-				b.lock.Lock()
-				if b.notifier != nil {
-					b.notifier(bytesRead, 0)
-				}
-				b.lock.Unlock()
-				continue
-			}
-
-			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
-			if _, err = conn.Write(resHeader); err != nil {
-				b.serverError(err)
-				break
-			}
-			if _, err = conn.Write(encodedRes); err != nil {
-				b.serverError(err)
-				break
-			}
-			bytesWritten = len(resHeader) + len(encodedRes)
-		} else {
-			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
-			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
-			b.lock.Lock()
-			res := b.gssApiHandler(buffer)
-			b.lock.Unlock()
-			if res == nil {
-				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
-				continue
-			}
-			if _, err = conn.Write(res); err != nil {
-				b.serverError(err)
-				break
-			}
-			bytesWritten = len(res)
-		}
-
-		b.lock.Lock()
-		if b.notifier != nil {
-			b.notifier(bytesRead, bytesWritten)
-		}
-		b.lock.Unlock()
-	}
-	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
-}
-
-func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
-	headerLength := uint32(8)
-
-	if headerVersion >= 1 {
-		headerLength = 9
-	}
-
-	resHeader := make([]byte, headerLength)
-	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
-	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
-
-	if headerVersion >= 1 {
-		binary.PutUvarint(resHeader[8:], 0)
-	}
-
-	return resHeader
-}
-
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
-	select {
-	case res, ok := <-b.expectations:
-		if !ok {
-			return nil
-		}
-		return res
-	case <-time.After(expectationTimeout):
-		return nil
-	}
-}
-
-func (b *MockBroker) serverError(err error) {
-	isConnectionClosedError := false
-	if _, ok := err.(*net.OpError); ok {
-		isConnectionClosedError = true
-	} else if err == io.EOF {
-		isConnectionClosedError = true
-	} else if err.Error() == "use of closed network connection" {
-		isConnectionClosedError = true
-	}
-
-	if isConnectionClosedError {
-		return
-	}
-
-	b.t.Errorf(err.Error())
-}
-
-// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
-// test framework and a channel of responses to use.  If an error occurs it is
-// simply logged to the TestReporter and the broker exits.
-func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
-	return NewMockBrokerAddr(t, brokerID, "localhost:0")
-}
-
-// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
-// it rather than just some ephemeral port.
-func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
-	listener, err := net.Listen("tcp", addr)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return NewMockBrokerListener(t, brokerID, listener)
-}
-
-// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
-func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
-	var err error
-
-	broker := &MockBroker{
-		closing:      make(chan none),
-		stopper:      make(chan none),
-		t:            t,
-		brokerID:     brokerID,
-		expectations: make(chan encoderWithHeader, 512),
-		listener:     listener,
-	}
-	broker.handler = broker.defaultRequestHandler
-
-	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
-	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmp, err := strconv.ParseInt(portStr, 10, 32)
-	if err != nil {
-		t.Fatal(err)
-	}
-	broker.port = int32(tmp)
-
-	go broker.serverLoop()
-
-	return broker
-}
-
-func (b *MockBroker) Returns(e encoderWithHeader) {
-	b.expectations <- e
-}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
deleted file mode 100644
index 6654ed0..0000000
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ /dev/null
@@ -1,1273 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-)
-
-// TestReporter has methods matching go's testing.T to avoid importing
-// `testing` in the main part of the library.
-type TestReporter interface {
-	Error(...interface{})
-	Errorf(string, ...interface{})
-	Fatal(...interface{})
-	Fatalf(string, ...interface{})
-}
-
-// MockResponse is a response builder interface it defines one method that
-// allows generating a response based on a request body. MockResponses are used
-// to program behavior of MockBroker in tests.
-type MockResponse interface {
-	For(reqBody versionedDecoder) (res encoderWithHeader)
-}
-
-// MockWrapper is a mock response builder that returns a particular concrete
-// response regardless of the actual request passed to the `For` method.
-type MockWrapper struct {
-	res encoderWithHeader
-}
-
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
-	return mw.res
-}
-
-func NewMockWrapper(res encoderWithHeader) *MockWrapper {
-	return &MockWrapper{res: res}
-}
-
-// MockSequence is a mock response builder that is created from a sequence of
-// concrete responses. Every time when a `MockBroker` calls its `For` method
-// the next response from the sequence is returned. When the end of the
-// sequence is reached the last element from the sequence is returned.
-type MockSequence struct {
-	responses []MockResponse
-}
-
-func NewMockSequence(responses ...interface{}) *MockSequence {
-	ms := &MockSequence{}
-	ms.responses = make([]MockResponse, len(responses))
-	for i, res := range responses {
-		switch res := res.(type) {
-		case MockResponse:
-			ms.responses[i] = res
-		case encoderWithHeader:
-			ms.responses[i] = NewMockWrapper(res)
-		default:
-			panic(fmt.Sprintf("Unexpected response type: %T", res))
-		}
-	}
-	return ms
-}
-
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
-	res = mc.responses[0].For(reqBody)
-	if len(mc.responses) > 1 {
-		mc.responses = mc.responses[1:]
-	}
-	return res
-}
-
-type MockListGroupsResponse struct {
-	groups map[string]string
-	t      TestReporter
-}
-
-func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
-	return &MockListGroupsResponse{
-		groups: make(map[string]string),
-		t:      t,
-	}
-}
-
-func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	request := reqBody.(*ListGroupsRequest)
-	_ = request
-	response := &ListGroupsResponse{
-		Groups: m.groups,
-	}
-	return response
-}
-
-func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
-	m.groups[groupID] = protocolType
-	return m
-}
-
-type MockDescribeGroupsResponse struct {
-	groups map[string]*GroupDescription
-	t      TestReporter
-}
-
-func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
-	return &MockDescribeGroupsResponse{
-		t:      t,
-		groups: make(map[string]*GroupDescription),
-	}
-}
-
-func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
-	m.groups[groupID] = description
-	return m
-}
-
-func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	request := reqBody.(*DescribeGroupsRequest)
-
-	response := &DescribeGroupsResponse{}
-	for _, requestedGroup := range request.Groups {
-		if group, ok := m.groups[requestedGroup]; ok {
-			response.Groups = append(response.Groups, group)
-		} else {
-			// Mimic real kafka - if a group doesn't exist, return
-			// an entry with state "Dead"
-			response.Groups = append(response.Groups, &GroupDescription{
-				GroupId: requestedGroup,
-				State:   "Dead",
-			})
-		}
-	}
-
-	return response
-}
-
-// MockMetadataResponse is a `MetadataResponse` builder.
-type MockMetadataResponse struct {
-	controllerID int32
-	leaders      map[string]map[int32]int32
-	brokers      map[string]int32
-	t            TestReporter
-}
-
-func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
-	return &MockMetadataResponse{
-		leaders: make(map[string]map[int32]int32),
-		brokers: make(map[string]int32),
-		t:       t,
-	}
-}
-
-func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
-	partitions := mmr.leaders[topic]
-	if partitions == nil {
-		partitions = make(map[int32]int32)
-		mmr.leaders[topic] = partitions
-	}
-	partitions[partition] = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
-	mmr.brokers[addr] = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
-	mmr.controllerID = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	metadataRequest := reqBody.(*MetadataRequest)
-	metadataResponse := &MetadataResponse{
-		Version:      metadataRequest.version(),
-		ControllerID: mmr.controllerID,
-	}
-	for addr, brokerID := range mmr.brokers {
-		metadataResponse.AddBroker(addr, brokerID)
-	}
-
-	// Generate set of replicas
-	var replicas []int32
-	var offlineReplicas []int32
-	for _, brokerID := range mmr.brokers {
-		replicas = append(replicas, brokerID)
-	}
-
-	if len(metadataRequest.Topics) == 0 {
-		for topic, partitions := range mmr.leaders {
-			for partition, brokerID := range partitions {
-				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
-			}
-		}
-		return metadataResponse
-	}
-	for _, topic := range metadataRequest.Topics {
-		for partition, brokerID := range mmr.leaders[topic] {
-			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
-		}
-	}
-	return metadataResponse
-}
-
-// MockOffsetResponse is an `OffsetResponse` builder.
-type MockOffsetResponse struct {
-	offsets map[string]map[int32]map[int64]int64
-	t       TestReporter
-	version int16
-}
-
-func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
-	return &MockOffsetResponse{
-		offsets: make(map[string]map[int32]map[int64]int64),
-		t:       t,
-	}
-}
-
-func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
-	mor.version = version
-	return mor
-}
-
-func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
-	partitions := mor.offsets[topic]
-	if partitions == nil {
-		partitions = make(map[int32]map[int64]int64)
-		mor.offsets[topic] = partitions
-	}
-	times := partitions[partition]
-	if times == nil {
-		times = make(map[int64]int64)
-		partitions[partition] = times
-	}
-	times[time] = offset
-	return mor
-}
-
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	offsetRequest := reqBody.(*OffsetRequest)
-	offsetResponse := &OffsetResponse{Version: mor.version}
-	for topic, partitions := range offsetRequest.blocks {
-		for partition, block := range partitions {
-			offset := mor.getOffset(topic, partition, block.time)
-			offsetResponse.AddTopicPartition(topic, partition, offset)
-		}
-	}
-	return offsetResponse
-}
-
-func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
-	partitions := mor.offsets[topic]
-	if partitions == nil {
-		mor.t.Errorf("missing topic: %s", topic)
-	}
-	times := partitions[partition]
-	if times == nil {
-		mor.t.Errorf("missing partition: %d", partition)
-	}
-	offset, ok := times[time]
-	if !ok {
-		mor.t.Errorf("missing time: %d", time)
-	}
-	return offset
-}
-
-// MockFetchResponse is a `FetchResponse` builder.
-type MockFetchResponse struct {
-	messages       map[string]map[int32]map[int64]Encoder
-	highWaterMarks map[string]map[int32]int64
-	t              TestReporter
-	batchSize      int
-	version        int16
-}
-
-func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
-	return &MockFetchResponse{
-		messages:       make(map[string]map[int32]map[int64]Encoder),
-		highWaterMarks: make(map[string]map[int32]int64),
-		t:              t,
-		batchSize:      batchSize,
-	}
-}
-
-func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
-	mfr.version = version
-	return mfr
-}
-
-func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		partitions = make(map[int32]map[int64]Encoder)
-		mfr.messages[topic] = partitions
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		messages = make(map[int64]Encoder)
-		partitions[partition] = messages
-	}
-	messages[offset] = msg
-	return mfr
-}
-
-func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
-	partitions := mfr.highWaterMarks[topic]
-	if partitions == nil {
-		partitions = make(map[int32]int64)
-		mfr.highWaterMarks[topic] = partitions
-	}
-	partitions[partition] = offset
-	return mfr
-}
-
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	fetchRequest := reqBody.(*FetchRequest)
-	res := &FetchResponse{
-		Version: mfr.version,
-	}
-	for topic, partitions := range fetchRequest.blocks {
-		for partition, block := range partitions {
-			initialOffset := block.fetchOffset
-			offset := initialOffset
-			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
-			for i := 0; i < mfr.batchSize && offset < maxOffset; {
-				msg := mfr.getMessage(topic, partition, offset)
-				if msg != nil {
-					res.AddMessage(topic, partition, nil, msg, offset)
-					i++
-				}
-				offset++
-			}
-			fb := res.GetBlock(topic, partition)
-			if fb == nil {
-				res.AddError(topic, partition, ErrNoError)
-				fb = res.GetBlock(topic, partition)
-			}
-			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
-		}
-	}
-	return res
-}
-
-func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		return nil
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		return nil
-	}
-	return messages[offset]
-}
-
-func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		return 0
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		return 0
-	}
-	return len(messages)
-}
-
-func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
-	partitions := mfr.highWaterMarks[topic]
-	if partitions == nil {
-		return 0
-	}
-	return partitions[partition]
-}
-
-// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
-type MockConsumerMetadataResponse struct {
-	coordinators map[string]interface{}
-	t            TestReporter
-}
-
-func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
-	return &MockConsumerMetadataResponse{
-		coordinators: make(map[string]interface{}),
-		t:            t,
-	}
-}
-
-func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
-	mr.coordinators[group] = broker
-	return mr
-}
-
-func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
-	mr.coordinators[group] = kerror
-	return mr
-}
-
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ConsumerMetadataRequest)
-	group := req.ConsumerGroup
-	res := &ConsumerMetadataResponse{}
-	v := mr.coordinators[group]
-	switch v := v.(type) {
-	case *MockBroker:
-		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
-	case KError:
-		res.Err = v
-	}
-	return res
-}
-
-// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
-type MockFindCoordinatorResponse struct {
-	groupCoordinators map[string]interface{}
-	transCoordinators map[string]interface{}
-	t                 TestReporter
-}
-
-func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
-	return &MockFindCoordinatorResponse{
-		groupCoordinators: make(map[string]interface{}),
-		transCoordinators: make(map[string]interface{}),
-		t:                 t,
-	}
-}
-
-func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
-	switch coordinatorType {
-	case CoordinatorGroup:
-		mr.groupCoordinators[group] = broker
-	case CoordinatorTransaction:
-		mr.transCoordinators[group] = broker
-	}
-	return mr
-}
-
-func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
-	switch coordinatorType {
-	case CoordinatorGroup:
-		mr.groupCoordinators[group] = kerror
-	case CoordinatorTransaction:
-		mr.transCoordinators[group] = kerror
-	}
-	return mr
-}
-
-func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*FindCoordinatorRequest)
-	res := &FindCoordinatorResponse{}
-	var v interface{}
-	switch req.CoordinatorType {
-	case CoordinatorGroup:
-		v = mr.groupCoordinators[req.CoordinatorKey]
-	case CoordinatorTransaction:
-		v = mr.transCoordinators[req.CoordinatorKey]
-	}
-	switch v := v.(type) {
-	case *MockBroker:
-		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
-	case KError:
-		res.Err = v
-	}
-	return res
-}
-
-// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
-type MockOffsetCommitResponse struct {
-	errors map[string]map[string]map[int32]KError
-	t      TestReporter
-}
-
-func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
-	return &MockOffsetCommitResponse{t: t}
-}
-
-func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
-	if mr.errors == nil {
-		mr.errors = make(map[string]map[string]map[int32]KError)
-	}
-	topics := mr.errors[group]
-	if topics == nil {
-		topics = make(map[string]map[int32]KError)
-		mr.errors[group] = topics
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		topics[topic] = partitions
-	}
-	partitions[partition] = kerror
-	return mr
-}
-
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*OffsetCommitRequest)
-	group := req.ConsumerGroup
-	res := &OffsetCommitResponse{}
-	for topic, partitions := range req.blocks {
-		for partition := range partitions {
-			res.AddError(topic, partition, mr.getError(group, topic, partition))
-		}
-	}
-	return res
-}
-
-func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
-	topics := mr.errors[group]
-	if topics == nil {
-		return ErrNoError
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		return ErrNoError
-	}
-	kerror, ok := partitions[partition]
-	if !ok {
-		return ErrNoError
-	}
-	return kerror
-}
-
-// MockProduceResponse is a `ProduceResponse` builder.
-type MockProduceResponse struct {
-	version int16
-	errors  map[string]map[int32]KError
-	t       TestReporter
-}
-
-func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
-	return &MockProduceResponse{t: t}
-}
-
-func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
-	mr.version = version
-	return mr
-}
-
-func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
-	if mr.errors == nil {
-		mr.errors = make(map[string]map[int32]KError)
-	}
-	partitions := mr.errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		mr.errors[topic] = partitions
-	}
-	partitions[partition] = kerror
-	return mr
-}
-
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ProduceRequest)
-	res := &ProduceResponse{
-		Version: mr.version,
-	}
-	for topic, partitions := range req.records {
-		for partition := range partitions {
-			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
-		}
-	}
-	return res
-}
-
-func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
-	partitions := mr.errors[topic]
-	if partitions == nil {
-		return ErrNoError
-	}
-	kerror, ok := partitions[partition]
-	if !ok {
-		return ErrNoError
-	}
-	return kerror
-}
-
-// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
-type MockOffsetFetchResponse struct {
-	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
-	error   KError
-	t       TestReporter
-}
-
-func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
-	return &MockOffsetFetchResponse{t: t}
-}
-
-func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
-	if mr.offsets == nil {
-		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
-	}
-	topics := mr.offsets[group]
-	if topics == nil {
-		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
-		mr.offsets[group] = topics
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*OffsetFetchResponseBlock)
-		topics[topic] = partitions
-	}
-	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
-	return mr
-}
-
-func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
-	mr.error = kerror
-	return mr
-}
-
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*OffsetFetchRequest)
-	group := req.ConsumerGroup
-	res := &OffsetFetchResponse{Version: req.Version}
-
-	for topic, partitions := range mr.offsets[group] {
-		for partition, block := range partitions {
-			res.AddBlock(topic, partition, block)
-		}
-	}
-
-	if res.Version >= 2 {
-		res.Err = mr.error
-	}
-	return res
-}
-
-type MockCreateTopicsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
-	return &MockCreateTopicsResponse{t: t}
-}
-
-func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreateTopicsRequest)
-	res := &CreateTopicsResponse{
-		Version: req.Version,
-	}
-	res.TopicErrors = make(map[string]*TopicError)
-
-	for topic := range req.TopicDetails {
-		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
-			msg := "insufficient permissions to create topic with reserved prefix"
-			res.TopicErrors[topic] = &TopicError{
-				Err:    ErrTopicAuthorizationFailed,
-				ErrMsg: &msg,
-			}
-			continue
-		}
-		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
-	}
-	return res
-}
-
-type MockDeleteTopicsResponse struct {
-	t TestReporter
-}
-
-func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
-	return &MockDeleteTopicsResponse{t: t}
-}
-
-func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteTopicsRequest)
-	res := &DeleteTopicsResponse{}
-	res.TopicErrorCodes = make(map[string]KError)
-
-	for _, topic := range req.Topics {
-		res.TopicErrorCodes[topic] = ErrNoError
-	}
-	res.Version = req.Version
-	return res
-}
-
-type MockCreatePartitionsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
-	return &MockCreatePartitionsResponse{t: t}
-}
-
-func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreatePartitionsRequest)
-	res := &CreatePartitionsResponse{}
-	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
-
-	for topic := range req.TopicPartitions {
-		if strings.HasPrefix(topic, "_") {
-			msg := "insufficient permissions to create partition on topic with reserved prefix"
-			res.TopicPartitionErrors[topic] = &TopicPartitionError{
-				Err:    ErrTopicAuthorizationFailed,
-				ErrMsg: &msg,
-			}
-			continue
-		}
-		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
-	}
-	return res
-}
-
-type MockAlterPartitionReassignmentsResponse struct {
-	t TestReporter
-}
-
-func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
-	return &MockAlterPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterPartitionReassignmentsRequest)
-	_ = req
-	res := &AlterPartitionReassignmentsResponse{}
-	return res
-}
-
-type MockListPartitionReassignmentsResponse struct {
-	t TestReporter
-}
-
-func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
-	return &MockListPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ListPartitionReassignmentsRequest)
-	_ = req
-	res := &ListPartitionReassignmentsResponse{}
-
-	for topic, partitions := range req.blocks {
-		for _, partition := range partitions {
-			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
-		}
-	}
-
-	return res
-}
-
-type MockDeleteRecordsResponse struct {
-	t TestReporter
-}
-
-func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
-	return &MockDeleteRecordsResponse{t: t}
-}
-
-func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteRecordsRequest)
-	res := &DeleteRecordsResponse{}
-	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
-
-	for topic, deleteRecordRequestTopic := range req.Topics {
-		partitions := make(map[int32]*DeleteRecordsResponsePartition)
-		for partition := range deleteRecordRequestTopic.PartitionOffsets {
-			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
-		}
-		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
-	}
-	return res
-}
-
-type MockDescribeConfigsResponse struct {
-	t TestReporter
-}
-
-func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
-	return &MockDescribeConfigsResponse{t: t}
-}
-
-func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{
-		Version: req.Version,
-	}
-
-	includeSynonyms := req.Version > 0
-	includeSource := req.Version > 0
-
-	for _, r := range req.Resources {
-		var configEntries []*ConfigEntry
-		switch r.Type {
-		case BrokerResource:
-			configEntries = append(configEntries,
-				&ConfigEntry{
-					Name:     "min.insync.replicas",
-					Value:    "2",
-					ReadOnly: false,
-					Default:  false,
-				},
-			)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		case BrokerLoggerResource:
-			configEntries = append(configEntries,
-				&ConfigEntry{
-					Name:     "kafka.controller.KafkaController",
-					Value:    "DEBUG",
-					ReadOnly: false,
-					Default:  false,
-				},
-			)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		case TopicResource:
-			maxMessageBytes := &ConfigEntry{
-				Name:      "max.message.bytes",
-				Value:     "1000000",
-				ReadOnly:  false,
-				Default:   !includeSource,
-				Sensitive: false,
-			}
-			if includeSource {
-				maxMessageBytes.Source = SourceDefault
-			}
-			if includeSynonyms {
-				maxMessageBytes.Synonyms = []*ConfigSynonym{
-					{
-						ConfigName:  "max.message.bytes",
-						ConfigValue: "500000",
-					},
-				}
-			}
-			retentionMs := &ConfigEntry{
-				Name:      "retention.ms",
-				Value:     "5000",
-				ReadOnly:  false,
-				Default:   false,
-				Sensitive: false,
-			}
-			if includeSynonyms {
-				retentionMs.Synonyms = []*ConfigSynonym{
-					{
-						ConfigName:  "log.retention.ms",
-						ConfigValue: "2500",
-					},
-				}
-			}
-			password := &ConfigEntry{
-				Name:      "password",
-				Value:     "12345",
-				ReadOnly:  false,
-				Default:   false,
-				Sensitive: true,
-			}
-			configEntries = append(
-				configEntries, maxMessageBytes, retentionMs, password)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		}
-	}
-	return res
-}
-
-type MockDescribeConfigsResponseWithErrorCode struct {
-	t TestReporter
-}
-
-func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
-	return &MockDescribeConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{
-		Version: req.Version,
-	}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &ResourceResponse{
-			Name:      r.Name,
-			Type:      r.Type,
-			ErrorCode: 83,
-			ErrorMsg:  "",
-		})
-	}
-	return res
-}
-
-type MockAlterConfigsResponse struct {
-	t TestReporter
-}
-
-func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
-	return &MockAlterConfigsResponse{t: t}
-}
-
-func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterConfigsRequest)
-	res := &AlterConfigsResponse{}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
-			Name:     r.Name,
-			Type:     r.Type,
-			ErrorMsg: "",
-		})
-	}
-	return res
-}
-
-type MockAlterConfigsResponseWithErrorCode struct {
-	t TestReporter
-}
-
-func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
-	return &MockAlterConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterConfigsRequest)
-	res := &AlterConfigsResponse{}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
-			Name:      r.Name,
-			Type:      r.Type,
-			ErrorCode: 83,
-			ErrorMsg:  "",
-		})
-	}
-	return res
-}
-
-type MockCreateAclsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
-	return &MockCreateAclsResponse{t: t}
-}
-
-func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreateAclsRequest)
-	res := &CreateAclsResponse{}
-
-	for range req.AclCreations {
-		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
-	}
-	return res
-}
-
-type MockListAclsResponse struct {
-	t TestReporter
-}
-
-func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
-	return &MockListAclsResponse{t: t}
-}
-
-func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeAclsRequest)
-	res := &DescribeAclsResponse{}
-	res.Err = ErrNoError
-	acl := &ResourceAcls{}
-	if req.ResourceName != nil {
-		acl.Resource.ResourceName = *req.ResourceName
-	}
-	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
-	acl.Resource.ResourceType = req.ResourceType
-
-	host := "*"
-	if req.Host != nil {
-		host = *req.Host
-	}
-
-	principal := "User:test"
-	if req.Principal != nil {
-		principal = *req.Principal
-	}
-
-	permissionType := req.PermissionType
-	if permissionType == AclPermissionAny {
-		permissionType = AclPermissionAllow
-	}
-
-	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
-	res.ResourceAcls = append(res.ResourceAcls, acl)
-	res.Version = int16(req.Version)
-	return res
-}
-
-type MockSaslAuthenticateResponse struct {
-	t             TestReporter
-	kerror        KError
-	saslAuthBytes []byte
-}
-
-func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
-	return &MockSaslAuthenticateResponse{t: t}
-}
-
-func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	res := &SaslAuthenticateResponse{}
-	res.Err = msar.kerror
-	res.SaslAuthBytes = msar.saslAuthBytes
-	return res
-}
-
-func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
-	msar.kerror = kerror
-	return msar
-}
-
-func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
-	msar.saslAuthBytes = saslAuthBytes
-	return msar
-}
-
-type MockDeleteAclsResponse struct {
-	t TestReporter
-}
-
-type MockSaslHandshakeResponse struct {
-	enabledMechanisms []string
-	kerror            KError
-	t                 TestReporter
-}
-
-func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
-	return &MockSaslHandshakeResponse{t: t}
-}
-
-func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	res := &SaslHandshakeResponse{}
-	res.Err = mshr.kerror
-	res.EnabledMechanisms = mshr.enabledMechanisms
-	return res
-}
-
-func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
-	mshr.kerror = kerror
-	return mshr
-}
-
-func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
-	mshr.enabledMechanisms = enabledMechanisms
-	return mshr
-}
-
-func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
-	return &MockDeleteAclsResponse{t: t}
-}
-
-func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteAclsRequest)
-	res := &DeleteAclsResponse{}
-
-	for range req.Filters {
-		response := &FilterResponse{Err: ErrNoError}
-		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
-		res.FilterResponses = append(res.FilterResponses, response)
-	}
-	res.Version = int16(req.Version)
-	return res
-}
-
-type MockDeleteGroupsResponse struct {
-	deletedGroups []string
-}
-
-func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
-	return &MockDeleteGroupsResponse{}
-}
-
-func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
-	m.deletedGroups = groups
-	return m
-}
-
-func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &DeleteGroupsResponse{
-		GroupErrorCodes: map[string]KError{},
-	}
-	for _, group := range m.deletedGroups {
-		resp.GroupErrorCodes[group] = ErrNoError
-	}
-	return resp
-}
-
-type MockJoinGroupResponse struct {
-	t TestReporter
-
-	ThrottleTime  int32
-	Err           KError
-	GenerationId  int32
-	GroupProtocol string
-	LeaderId      string
-	MemberId      string
-	Members       map[string][]byte
-}
-
-func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
-	return &MockJoinGroupResponse{
-		t:       t,
-		Members: make(map[string][]byte),
-	}
-}
-
-func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*JoinGroupRequest)
-	resp := &JoinGroupResponse{
-		Version:       req.Version,
-		ThrottleTime:  m.ThrottleTime,
-		Err:           m.Err,
-		GenerationId:  m.GenerationId,
-		GroupProtocol: m.GroupProtocol,
-		LeaderId:      m.LeaderId,
-		MemberId:      m.MemberId,
-		Members:       m.Members,
-	}
-	return resp
-}
-
-func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
-	m.ThrottleTime = t
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
-	m.GenerationId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
-	m.GroupProtocol = proto
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
-	m.LeaderId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
-	m.MemberId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
-	bin, err := encode(meta, nil)
-	if err != nil {
-		panic(fmt.Sprintf("error encoding member metadata: %v", err))
-	}
-	m.Members[id] = bin
-	return m
-}
-
-type MockLeaveGroupResponse struct {
-	t TestReporter
-
-	Err KError
-}
-
-func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
-	return &MockLeaveGroupResponse{t: t}
-}
-
-func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &LeaveGroupResponse{
-		Err: m.Err,
-	}
-	return resp
-}
-
-func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-type MockSyncGroupResponse struct {
-	t TestReporter
-
-	Err              KError
-	MemberAssignment []byte
-}
-
-func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
-	return &MockSyncGroupResponse{t: t}
-}
-
-func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &SyncGroupResponse{
-		Err:              m.Err,
-		MemberAssignment: m.MemberAssignment,
-	}
-	return resp
-}
-
-func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
-	bin, err := encode(assignment, nil)
-	if err != nil {
-		panic(fmt.Sprintf("error encoding member assignment: %v", err))
-	}
-	m.MemberAssignment = bin
-	return m
-}
-
-type MockHeartbeatResponse struct {
-	t TestReporter
-
-	Err KError
-}
-
-func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
-	return &MockHeartbeatResponse{t: t}
-}
-
-func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &HeartbeatResponse{}
-	return resp
-}
-
-func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
-	m.Err = kerr
-	return m
-}
-
-type MockDescribeLogDirsResponse struct {
-	t       TestReporter
-	logDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
-	return &MockDescribeLogDirsResponse{t: t}
-}
-
-func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
-	var topics []DescribeLogDirsResponseTopic
-	for topic := range topicPartitions {
-		var partitions []DescribeLogDirsResponsePartition
-		for i := 0; i < topicPartitions[topic]; i++ {
-			partitions = append(partitions, DescribeLogDirsResponsePartition{
-				PartitionID: int32(i),
-				IsTemporary: false,
-				OffsetLag:   int64(0),
-				Size:        int64(1234),
-			})
-		}
-		topics = append(topics, DescribeLogDirsResponseTopic{
-			Topic:      topic,
-			Partitions: partitions,
-		})
-	}
-	logDir := DescribeLogDirsResponseDirMetadata{
-		ErrorCode: ErrNoError,
-		Path:      logDirPath,
-		Topics:    topics,
-	}
-	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
-	return m
-}
-
-func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &DescribeLogDirsResponse{
-		LogDirs: m.logDirs,
-	}
-	return resp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
deleted file mode 100644
index 9931cad..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package sarama
-
-import "errors"
-
-// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
-// tells the broker to set the timestamp to the time at which the request was received.
-// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
-const ReceiveTime int64 = -1
-
-// GroupGenerationUndefined is a special value for the group generation field of
-// Offset Commit Requests that should be used when a consumer group does not rely
-// on Kafka for partition management.
-const GroupGenerationUndefined = -1
-
-type offsetCommitRequestBlock struct {
-	offset    int64
-	timestamp int64
-	metadata  string
-}
-
-func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(b.offset)
-	if version == 1 {
-		pe.putInt64(b.timestamp)
-	} else if b.timestamp != 0 {
-		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
-	}
-
-	return pe.putString(b.metadata)
-}
-
-func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	if b.offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if version == 1 {
-		if b.timestamp, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-	b.metadata, err = pd.getString()
-	return err
-}
-
-type OffsetCommitRequest struct {
-	ConsumerGroup           string
-	ConsumerGroupGeneration int32  // v1 or later
-	ConsumerID              string // v1 or later
-	RetentionTime           int64  // v2 or later
-
-	// Version can be:
-	// - 0 (kafka 0.8.1 and later)
-	// - 1 (kafka 0.8.2 and later)
-	// - 2 (kafka 0.9.0 and later)
-	// - 3 (kafka 0.11.0 and later)
-	// - 4 (kafka 2.0.0 and later)
-	Version int16
-	blocks  map[string]map[int32]*offsetCommitRequestBlock
-}
-
-func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
-	if r.Version < 0 || r.Version > 4 {
-		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
-	}
-
-	if err := pe.putString(r.ConsumerGroup); err != nil {
-		return err
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(r.ConsumerGroupGeneration)
-		if err := pe.putString(r.ConsumerID); err != nil {
-			return err
-		}
-	} else {
-		if r.ConsumerGroupGeneration != 0 {
-			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
-		}
-		if r.ConsumerID != "" {
-			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
-		}
-	}
-
-	if r.Version >= 2 {
-		pe.putInt64(r.RetentionTime)
-	} else if r.RetentionTime != 0 {
-		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
-	}
-
-	if err := pe.putArrayLength(len(r.blocks)); err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ConsumerGroup, err = pd.getString(); err != nil {
-		return err
-	}
-
-	if r.Version >= 1 {
-		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
-			return err
-		}
-		if r.ConsumerID, err = pd.getString(); err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 2 {
-		if r.RetentionTime, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			block := &offsetCommitRequestBlock{}
-			if err := block.decode(pd, r.Version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = block
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitRequest) key() int16 {
-	return 8
-}
-
-func (r *OffsetCommitRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetCommitRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_9_0_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
-	}
-
-	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
-}
-
-func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
-	partitions := r.blocks[topic]
-	if partitions == nil {
-		return 0, "", errors.New("no such offset")
-	}
-	block := partitions[partitionID]
-	if block == nil {
-		return 0, "", errors.New("no such offset")
-	}
-	return block.offset, block.metadata, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
deleted file mode 100644
index 342260e..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-type OffsetCommitResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Errors         map[string]map[int32]KError
-}
-
-func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
-	if r.Errors == nil {
-		r.Errors = make(map[string]map[int32]KError)
-	}
-	partitions := r.Errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		r.Errors[topic] = partitions
-	}
-	partitions[partition] = kerror
-}
-
-func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-	if err := pe.putArrayLength(len(r.Errors)); err != nil {
-		return err
-	}
-	for topic, partitions := range r.Errors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, kerror := range partitions {
-			pe.putInt32(partition)
-			pe.putInt16(int16(kerror))
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil || numTopics == 0 {
-		return err
-	}
-
-	r.Errors = make(map[string]map[int32]KError, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numErrors, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Errors[name] = make(map[int32]KError, numErrors)
-
-		for j := 0; j < numErrors; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			tmp, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-			r.Errors[name][id] = KError(tmp)
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetCommitResponse) key() int16 {
-	return 8
-}
-
-func (r *OffsetCommitResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetCommitResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_9_0_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	default:
-		return MinVersion
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
deleted file mode 100644
index 7e147eb..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-type OffsetFetchRequest struct {
-	Version       int16
-	ConsumerGroup string
-	RequireStable bool // requires v7+
-	partitions    map[string][]int32
-}
-
-func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
-	if r.Version < 0 || r.Version > 7 {
-		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
-	}
-
-	isFlexible := r.Version >= 6
-
-	if isFlexible {
-		err = pe.putCompactString(r.ConsumerGroup)
-	} else {
-		err = pe.putString(r.ConsumerGroup)
-	}
-	if err != nil {
-		return err
-	}
-
-	if isFlexible {
-		if r.partitions == nil {
-			pe.putUVarint(0)
-		} else {
-			pe.putCompactArrayLength(len(r.partitions))
-		}
-	} else {
-		if r.partitions == nil && r.Version >= 2 {
-			pe.putInt32(-1)
-		} else {
-			if err = pe.putArrayLength(len(r.partitions)); err != nil {
-				return err
-			}
-		}
-	}
-
-	for topic, partitions := range r.partitions {
-		if isFlexible {
-			err = pe.putCompactString(topic)
-		} else {
-			err = pe.putString(topic)
-		}
-		if err != nil {
-			return err
-		}
-
-		//
-
-		if isFlexible {
-			err = pe.putCompactInt32Array(partitions)
-		} else {
-			err = pe.putInt32Array(partitions)
-		}
-		if err != nil {
-			return err
-		}
-
-		if isFlexible {
-			pe.putEmptyTaggedFieldArray()
-		}
-	}
-
-	if r.RequireStable && r.Version < 7 {
-		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
-	}
-
-	if r.Version >= 7 {
-		pe.putBool(r.RequireStable)
-	}
-
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	isFlexible := r.Version >= 6
-	if isFlexible {
-		r.ConsumerGroup, err = pd.getCompactString()
-	} else {
-		r.ConsumerGroup, err = pd.getString()
-	}
-	if err != nil {
-		return err
-	}
-
-	var partitionCount int
-
-	if isFlexible {
-		partitionCount, err = pd.getCompactArrayLength()
-	} else {
-		partitionCount, err = pd.getArrayLength()
-	}
-	if err != nil {
-		return err
-	}
-
-	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
-		return nil
-	}
-
-	r.partitions = make(map[string][]int32, partitionCount)
-	for i := 0; i < partitionCount; i++ {
-		var topic string
-		if isFlexible {
-			topic, err = pd.getCompactString()
-		} else {
-			topic, err = pd.getString()
-		}
-		if err != nil {
-			return err
-		}
-
-		var partitions []int32
-		if isFlexible {
-			partitions, err = pd.getCompactInt32Array()
-		} else {
-			partitions, err = pd.getInt32Array()
-		}
-		if err != nil {
-			return err
-		}
-		if isFlexible {
-			_, err = pd.getEmptyTaggedFieldArray()
-			if err != nil {
-				return err
-			}
-		}
-
-		r.partitions[topic] = partitions
-	}
-
-	if r.Version >= 7 {
-		r.RequireStable, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-	}
-
-	if isFlexible {
-		_, err = pd.getEmptyTaggedFieldArray()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchRequest) key() int16 {
-	return 9
-}
-
-func (r *OffsetFetchRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetFetchRequest) headerVersion() int16 {
-	if r.Version >= 6 {
-		return 2
-	}
-
-	return 1
-}
-
-func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_10_2_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	case 5:
-		return V2_1_0_0
-	case 6:
-		return V2_4_0_0
-	case 7:
-		return V2_5_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetFetchRequest) ZeroPartitions() {
-	if r.partitions == nil && r.Version >= 2 {
-		r.partitions = make(map[string][]int32)
-	}
-}
-
-func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
-	if r.partitions == nil {
-		r.partitions = make(map[string][]int32)
-	}
-
-	r.partitions[topic] = append(r.partitions[topic], partitionID)
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
deleted file mode 100644
index 1944922..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package sarama
-
-type OffsetFetchResponseBlock struct {
-	Offset      int64
-	LeaderEpoch int32
-	Metadata    string
-	Err         KError
-}
-
-func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	isFlexible := version >= 6
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		b.LeaderEpoch, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	if isFlexible {
-		b.Metadata, err = pd.getCompactString()
-	} else {
-		b.Metadata, err = pd.getString()
-	}
-	if err != nil {
-		return err
-	}
-
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	if isFlexible {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	isFlexible := version >= 6
-	pe.putInt64(b.Offset)
-
-	if version >= 5 {
-		pe.putInt32(b.LeaderEpoch)
-	}
-	if isFlexible {
-		err = pe.putCompactString(b.Metadata)
-	} else {
-		err = pe.putString(b.Metadata)
-	}
-	if err != nil {
-		return err
-	}
-
-	pe.putInt16(int16(b.Err))
-
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	return nil
-}
-
-type OffsetFetchResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
-	Err            KError
-}
-
-func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
-	isFlexible := r.Version >= 6
-
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-	if isFlexible {
-		pe.putCompactArrayLength(len(r.Blocks))
-	} else {
-		err = pe.putArrayLength(len(r.Blocks))
-	}
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		if isFlexible {
-			err = pe.putCompactString(topic)
-		} else {
-			err = pe.putString(topic)
-		}
-		if err != nil {
-			return err
-		}
-
-		if isFlexible {
-			pe.putCompactArrayLength(len(partitions))
-		} else {
-			err = pe.putArrayLength(len(partitions))
-		}
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-		if isFlexible {
-			pe.putEmptyTaggedFieldArray()
-		}
-	}
-	if r.Version >= 2 {
-		pe.putInt16(int16(r.Err))
-	}
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-	return nil
-}
-
-func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	isFlexible := version >= 6
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	var numTopics int
-	if isFlexible {
-		numTopics, err = pd.getCompactArrayLength()
-	} else {
-		numTopics, err = pd.getArrayLength()
-	}
-	if err != nil {
-		return err
-	}
-
-	if numTopics > 0 {
-		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
-		for i := 0; i < numTopics; i++ {
-			var name string
-			if isFlexible {
-				name, err = pd.getCompactString()
-			} else {
-				name, err = pd.getString()
-			}
-			if err != nil {
-				return err
-			}
-
-			var numBlocks int
-			if isFlexible {
-				numBlocks, err = pd.getCompactArrayLength()
-			} else {
-				numBlocks, err = pd.getArrayLength()
-			}
-			if err != nil {
-				return err
-			}
-
-			r.Blocks[name] = nil
-			if numBlocks > 0 {
-				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-			}
-			for j := 0; j < numBlocks; j++ {
-				id, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-
-				block := new(OffsetFetchResponseBlock)
-				err = block.decode(pd, version)
-				if err != nil {
-					return err
-				}
-
-				r.Blocks[name][id] = block
-			}
-
-			if isFlexible {
-				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-		}
-	}
-
-	if version >= 2 {
-		kerr, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-		r.Err = KError(kerr)
-	}
-
-	if isFlexible {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchResponse) key() int16 {
-	return 9
-}
-
-func (r *OffsetFetchResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetFetchResponse) headerVersion() int16 {
-	if r.Version >= 6 {
-		return 1
-	}
-
-	return 0
-}
-
-func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_10_2_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	case 5:
-		return V2_1_0_0
-	case 6:
-		return V2_4_0_0
-	case 7:
-		return V2_5_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
-	}
-	partitions := r.Blocks[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*OffsetFetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	partitions[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
deleted file mode 100644
index 4f480a0..0000000
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ /dev/null
@@ -1,593 +0,0 @@
-package sarama
-
-import (
-	"sync"
-	"time"
-)
-
-// Offset Manager
-
-// OffsetManager uses Kafka to store and fetch consumed partition offsets.
-type OffsetManager interface {
-	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
-	// It will return an error if this OffsetManager is already managing the given
-	// topic/partition.
-	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
-
-	// Close stops the OffsetManager from managing offsets. It is required to call
-	// this function before an OffsetManager object passes out of scope, as it
-	// will otherwise leak memory. You must call this after all the
-	// PartitionOffsetManagers are closed.
-	Close() error
-
-	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
-	// set to false.
-	Commit()
-}
-
-type offsetManager struct {
-	client Client
-	conf   *Config
-	group  string
-	ticker *time.Ticker
-
-	memberID   string
-	generation int32
-
-	broker     *Broker
-	brokerLock sync.RWMutex
-
-	poms     map[string]map[int32]*partitionOffsetManager
-	pomsLock sync.RWMutex
-
-	closeOnce sync.Once
-	closing   chan none
-	closed    chan none
-}
-
-// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
-// It is still necessary to call Close() on the underlying client when finished with the partition manager.
-func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
-	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
-}
-
-func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	conf := client.Config()
-	om := &offsetManager{
-		client: client,
-		conf:   conf,
-		group:  group,
-		poms:   make(map[string]map[int32]*partitionOffsetManager),
-
-		memberID:   memberID,
-		generation: generation,
-
-		closing: make(chan none),
-		closed:  make(chan none),
-	}
-	if conf.Consumer.Offsets.AutoCommit.Enable {
-		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
-		go withRecover(om.mainLoop)
-	}
-
-	return om, nil
-}
-
-func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
-	pom, err := om.newPartitionOffsetManager(topic, partition)
-	if err != nil {
-		return nil, err
-	}
-
-	om.pomsLock.Lock()
-	defer om.pomsLock.Unlock()
-
-	topicManagers := om.poms[topic]
-	if topicManagers == nil {
-		topicManagers = make(map[int32]*partitionOffsetManager)
-		om.poms[topic] = topicManagers
-	}
-
-	if topicManagers[partition] != nil {
-		return nil, ConfigurationError("That topic/partition is already being managed")
-	}
-
-	topicManagers[partition] = pom
-	return pom, nil
-}
-
-func (om *offsetManager) Close() error {
-	om.closeOnce.Do(func() {
-		// exit the mainLoop
-		close(om.closing)
-		if om.conf.Consumer.Offsets.AutoCommit.Enable {
-			<-om.closed
-		}
-
-		// mark all POMs as closed
-		om.asyncClosePOMs()
-
-		// flush one last time
-		if om.conf.Consumer.Offsets.AutoCommit.Enable {
-			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
-				om.flushToBroker()
-				if om.releasePOMs(false) == 0 {
-					break
-				}
-			}
-		}
-
-		om.releasePOMs(true)
-		om.brokerLock.Lock()
-		om.broker = nil
-		om.brokerLock.Unlock()
-	})
-	return nil
-}
-
-func (om *offsetManager) computeBackoff(retries int) time.Duration {
-	if om.conf.Metadata.Retry.BackoffFunc != nil {
-		return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
-	} else {
-		return om.conf.Metadata.Retry.Backoff
-	}
-}
-
-func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
-	broker, err := om.coordinator()
-	if err != nil {
-		if retries <= 0 {
-			return 0, "", err
-		}
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	}
-
-	req := new(OffsetFetchRequest)
-	req.Version = 1
-	req.ConsumerGroup = om.group
-	req.AddPartition(topic, partition)
-
-	resp, err := broker.FetchOffset(req)
-	if err != nil {
-		if retries <= 0 {
-			return 0, "", err
-		}
-		om.releaseCoordinator(broker)
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	}
-
-	block := resp.GetBlock(topic, partition)
-	if block == nil {
-		return 0, "", ErrIncompleteResponse
-	}
-
-	switch block.Err {
-	case ErrNoError:
-		return block.Offset, block.Metadata, nil
-	case ErrNotCoordinatorForConsumer:
-		if retries <= 0 {
-			return 0, "", block.Err
-		}
-		om.releaseCoordinator(broker)
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	case ErrOffsetsLoadInProgress:
-		if retries <= 0 {
-			return 0, "", block.Err
-		}
-		backoff := om.computeBackoff(retries)
-		select {
-		case <-om.closing:
-			return 0, "", block.Err
-		case <-time.After(backoff):
-		}
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	default:
-		return 0, "", block.Err
-	}
-}
-
-func (om *offsetManager) coordinator() (*Broker, error) {
-	om.brokerLock.RLock()
-	broker := om.broker
-	om.brokerLock.RUnlock()
-
-	if broker != nil {
-		return broker, nil
-	}
-
-	om.brokerLock.Lock()
-	defer om.brokerLock.Unlock()
-
-	if broker := om.broker; broker != nil {
-		return broker, nil
-	}
-
-	if err := om.client.RefreshCoordinator(om.group); err != nil {
-		return nil, err
-	}
-
-	broker, err := om.client.Coordinator(om.group)
-	if err != nil {
-		return nil, err
-	}
-
-	om.broker = broker
-	return broker, nil
-}
-
-func (om *offsetManager) releaseCoordinator(b *Broker) {
-	om.brokerLock.Lock()
-	if om.broker == b {
-		om.broker = nil
-	}
-	om.brokerLock.Unlock()
-}
-
-func (om *offsetManager) mainLoop() {
-	defer om.ticker.Stop()
-	defer close(om.closed)
-
-	for {
-		select {
-		case <-om.ticker.C:
-			om.Commit()
-		case <-om.closing:
-			return
-		}
-	}
-}
-
-func (om *offsetManager) Commit() {
-	om.flushToBroker()
-	om.releasePOMs(false)
-}
-
-func (om *offsetManager) flushToBroker() {
-	req := om.constructRequest()
-	if req == nil {
-		return
-	}
-
-	broker, err := om.coordinator()
-	if err != nil {
-		om.handleError(err)
-		return
-	}
-
-	resp, err := broker.CommitOffset(req)
-	if err != nil {
-		om.handleError(err)
-		om.releaseCoordinator(broker)
-		_ = broker.Close()
-		return
-	}
-
-	om.handleResponse(broker, req, resp)
-}
-
-func (om *offsetManager) constructRequest() *OffsetCommitRequest {
-	var r *OffsetCommitRequest
-	var perPartitionTimestamp int64
-	if om.conf.Consumer.Offsets.Retention == 0 {
-		perPartitionTimestamp = ReceiveTime
-		r = &OffsetCommitRequest{
-			Version:                 1,
-			ConsumerGroup:           om.group,
-			ConsumerID:              om.memberID,
-			ConsumerGroupGeneration: om.generation,
-		}
-	} else {
-		r = &OffsetCommitRequest{
-			Version:                 2,
-			RetentionTime:           int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
-			ConsumerGroup:           om.group,
-			ConsumerID:              om.memberID,
-			ConsumerGroupGeneration: om.generation,
-		}
-	}
-
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.lock.Lock()
-			if pom.dirty {
-				r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
-			}
-			pom.lock.Unlock()
-		}
-	}
-
-	if len(r.blocks) > 0 {
-		return r
-	}
-
-	return nil
-}
-
-func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
-				continue
-			}
-
-			var err KError
-			var ok bool
-
-			if resp.Errors[pom.topic] == nil {
-				pom.handleError(ErrIncompleteResponse)
-				continue
-			}
-			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
-				pom.handleError(ErrIncompleteResponse)
-				continue
-			}
-
-			switch err {
-			case ErrNoError:
-				block := req.blocks[pom.topic][pom.partition]
-				pom.updateCommitted(block.offset, block.metadata)
-			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
-				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
-				// not a critical error, we just need to redispatch
-				om.releaseCoordinator(broker)
-			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
-				// nothing we can do about this, just tell the user and carry on
-				pom.handleError(err)
-			case ErrOffsetsLoadInProgress:
-				// nothing wrong but we didn't commit, we'll get it next time round
-			case ErrUnknownTopicOrPartition:
-				// let the user know *and* try redispatching - if topic-auto-create is
-				// enabled, redispatching should trigger a metadata req and create the
-				// topic; if not then re-dispatching won't help, but we've let the user
-				// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
-				fallthrough
-			default:
-				// dunno, tell the user and try redispatching
-				pom.handleError(err)
-				om.releaseCoordinator(broker)
-			}
-		}
-	}
-}
-
-func (om *offsetManager) handleError(err error) {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.handleError(err)
-		}
-	}
-}
-
-func (om *offsetManager) asyncClosePOMs() {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.AsyncClose()
-		}
-	}
-}
-
-// Releases/removes closed POMs once they are clean (or when forced)
-func (om *offsetManager) releasePOMs(force bool) (remaining int) {
-	om.pomsLock.Lock()
-	defer om.pomsLock.Unlock()
-
-	for topic, topicManagers := range om.poms {
-		for partition, pom := range topicManagers {
-			pom.lock.Lock()
-			releaseDue := pom.done && (force || !pom.dirty)
-			pom.lock.Unlock()
-
-			if releaseDue {
-				pom.release()
-
-				delete(om.poms[topic], partition)
-				if len(om.poms[topic]) == 0 {
-					delete(om.poms, topic)
-				}
-			}
-		}
-		remaining += len(om.poms[topic])
-	}
-	return
-}
-
-func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	if partitions, ok := om.poms[topic]; ok {
-		if pom, ok := partitions[partition]; ok {
-			return pom
-		}
-	}
-	return nil
-}
-
-// Partition Offset Manager
-
-// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
-// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
-// out of scope.
-type PartitionOffsetManager interface {
-	// NextOffset returns the next offset that should be consumed for the managed
-	// partition, accompanied by metadata which can be used to reconstruct the state
-	// of the partition consumer when it resumes. NextOffset() will return
-	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
-	// was committed for this partition yet.
-	NextOffset() (int64, string)
-
-	// MarkOffset marks the provided offset, alongside a metadata string
-	// that represents the state of the partition consumer at that point in time. The
-	// metadata string can be used by another consumer to restore that state, so it
-	// can resume consumption.
-	//
-	// To follow upstream conventions, you are expected to mark the offset of the
-	// next message to read, not the last message read. Thus, when calling `MarkOffset`
-	// you should typically add one to the offset of the last consumed message.
-	//
-	// Note: calling MarkOffset does not necessarily commit the offset to the backend
-	// store immediately for efficiency reasons, and it may never be committed if
-	// your application crashes. This means that you may end up processing the same
-	// message twice, and your processing should ideally be idempotent.
-	MarkOffset(offset int64, metadata string)
-
-	// ResetOffset resets to the provided offset, alongside a metadata string that
-	// represents the state of the partition consumer at that point in time. Reset
-	// acts as a counterpart to MarkOffset, the difference being that it allows to
-	// reset an offset to an earlier or smaller value, where MarkOffset only
-	// allows incrementing the offset. cf MarkOffset for more details.
-	ResetOffset(offset int64, metadata string)
-
-	// Errors returns a read channel of errors that occur during offset management, if
-	// enabled. By default, errors are logged and not returned over this channel. If
-	// you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan *ConsumerError
-
-	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
-	// return immediately, after which you should wait until the 'errors' channel has
-	// been drained and closed. It is required to call this function, or Close before
-	// a consumer object passes out of scope, as it will otherwise leak memory. You
-	// must call this before calling Close on the underlying client.
-	AsyncClose()
-
-	// Close stops the PartitionOffsetManager from managing offsets. It is required to
-	// call this function (or AsyncClose) before a PartitionOffsetManager object
-	// passes out of scope, as it will otherwise leak memory. You must call this
-	// before calling Close on the underlying client.
-	Close() error
-}
-
-type partitionOffsetManager struct {
-	parent    *offsetManager
-	topic     string
-	partition int32
-
-	lock     sync.Mutex
-	offset   int64
-	metadata string
-	dirty    bool
-	done     bool
-
-	releaseOnce sync.Once
-	errors      chan *ConsumerError
-}
-
-func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
-	offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
-	if err != nil {
-		return nil, err
-	}
-
-	return &partitionOffsetManager{
-		parent:    om,
-		topic:     topic,
-		partition: partition,
-		errors:    make(chan *ConsumerError, om.conf.ChannelBufferSize),
-		offset:    offset,
-		metadata:  metadata,
-	}, nil
-}
-
-func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
-	return pom.errors
-}
-
-func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if offset > pom.offset {
-		pom.offset = offset
-		pom.metadata = metadata
-		pom.dirty = true
-	}
-}
-
-func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if offset <= pom.offset {
-		pom.offset = offset
-		pom.metadata = metadata
-		pom.dirty = true
-	}
-}
-
-func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if pom.offset == offset && pom.metadata == metadata {
-		pom.dirty = false
-	}
-}
-
-func (pom *partitionOffsetManager) NextOffset() (int64, string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if pom.offset >= 0 {
-		return pom.offset, pom.metadata
-	}
-
-	return pom.parent.conf.Consumer.Offsets.Initial, ""
-}
-
-func (pom *partitionOffsetManager) AsyncClose() {
-	pom.lock.Lock()
-	pom.done = true
-	pom.lock.Unlock()
-}
-
-func (pom *partitionOffsetManager) Close() error {
-	pom.AsyncClose()
-
-	var errors ConsumerErrors
-	for err := range pom.errors {
-		errors = append(errors, err)
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (pom *partitionOffsetManager) handleError(err error) {
-	cErr := &ConsumerError{
-		Topic:     pom.topic,
-		Partition: pom.partition,
-		Err:       err,
-	}
-
-	if pom.parent.conf.Consumer.Return.Errors {
-		pom.errors <- cErr
-	} else {
-		Logger.Println(cErr)
-	}
-}
-
-func (pom *partitionOffsetManager) release() {
-	pom.releaseOnce.Do(func() {
-		close(pom.errors)
-	})
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
deleted file mode 100644
index 4c9ce4d..0000000
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package sarama
-
-type offsetRequestBlock struct {
-	time       int64
-	maxOffsets int32 // Only used in version 0
-}
-
-func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(b.time)
-	if version == 0 {
-		pe.putInt32(b.maxOffsets)
-	}
-
-	return nil
-}
-
-func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	if b.time, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if version == 0 {
-		if b.maxOffsets, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-type OffsetRequest struct {
-	Version        int16
-	IsolationLevel IsolationLevel
-	replicaID      int32
-	isReplicaIDSet bool
-	blocks         map[string]map[int32]*offsetRequestBlock
-}
-
-func (r *OffsetRequest) encode(pe packetEncoder) error {
-	if r.isReplicaIDSet {
-		pe.putInt32(r.replicaID)
-	} else {
-		// default replica ID is always -1 for clients
-		pe.putInt32(-1)
-	}
-
-	if r.Version >= 2 {
-		pe.putBool(r.IsolationLevel == ReadCommitted)
-	}
-
-	err := pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err = block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-
-	replicaID, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if replicaID >= 0 {
-		r.SetReplicaID(replicaID)
-	}
-
-	if r.Version >= 2 {
-		tmp, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-
-		r.IsolationLevel = ReadUncommitted
-		if tmp {
-			r.IsolationLevel = ReadCommitted
-		}
-	}
-
-	blockCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if blockCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
-	for i := 0; i < blockCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			block := &offsetRequestBlock{}
-			if err := block.decode(pd, version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = block
-		}
-	}
-	return nil
-}
-
-func (r *OffsetRequest) key() int16 {
-	return 2
-}
-
-func (r *OffsetRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *OffsetRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_1_0
-	case 2:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetRequest) SetReplicaID(id int32) {
-	r.replicaID = id
-	r.isReplicaIDSet = true
-}
-
-func (r *OffsetRequest) ReplicaID() int32 {
-	if r.isReplicaIDSet {
-		return r.replicaID
-	}
-	return -1
-}
-
-func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
-	}
-
-	tmp := new(offsetRequestBlock)
-	tmp.time = time
-	if r.Version == 0 {
-		tmp.maxOffsets = maxOffsets
-	}
-
-	r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
deleted file mode 100644
index 69349ef..0000000
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package sarama
-
-type OffsetResponseBlock struct {
-	Err       KError
-	Offsets   []int64 // Version 0
-	Offset    int64   // Version 1
-	Timestamp int64   // Version 1
-}
-
-func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	if version == 0 {
-		b.Offsets, err = pd.getInt64Array()
-
-		return err
-	}
-
-	b.Timestamp, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	// For backwards compatibility put the offset in the offsets array too
-	b.Offsets = []int64{b.Offset}
-
-	return nil
-}
-
-func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-
-	if version == 0 {
-		return pe.putInt64Array(b.Offsets)
-	}
-
-	pe.putInt64(b.Timestamp)
-	pe.putInt64(b.Offset)
-
-	return nil
-}
-
-type OffsetResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Blocks         map[string]map[int32]*OffsetResponseBlock
-}
-
-func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 2 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(OffsetResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-/*
-// [0 0 0 1 ntopics
-0 8 109 121 95 116 111 112 105 99 topic
-0 0 0 1 npartitions
-0 0 0 0 id
-0 0
-
-0 0 0 1 0 0 0 0
-0 1 1 1 0 0 0 1
-0 8 109 121 95 116 111 112
-105 99 0 0 0 1 0 0
-0 0 0 0 0 0 0 1
-0 0 0 0 0 1 1 1] <nil>
-
-*/
-func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
-	if r.Version >= 2 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-
-	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		if err = pe.putString(topic); err != nil {
-			return err
-		}
-		if err = pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err = block.encode(pe, r.version()); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetResponse) key() int16 {
-	return 2
-}
-
-func (r *OffsetResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *OffsetResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_1_0
-	case 2:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
-}
-
-// testing API
-
-func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
-	}
-	byTopic, ok := r.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*OffsetResponseBlock)
-		r.Blocks[topic] = byTopic
-	}
-	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
deleted file mode 100644
index 184bc26..0000000
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package sarama
-
-// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
-// Types implementing Decoder only need to worry about calling methods like GetString,
-// not about how a string is represented in Kafka.
-type packetDecoder interface {
-	// Primitives
-	getInt8() (int8, error)
-	getInt16() (int16, error)
-	getInt32() (int32, error)
-	getInt64() (int64, error)
-	getVarint() (int64, error)
-	getUVarint() (uint64, error)
-	getArrayLength() (int, error)
-	getCompactArrayLength() (int, error)
-	getBool() (bool, error)
-	getEmptyTaggedFieldArray() (int, error)
-
-	// Collections
-	getBytes() ([]byte, error)
-	getVarintBytes() ([]byte, error)
-	getCompactBytes() ([]byte, error)
-	getRawBytes(length int) ([]byte, error)
-	getString() (string, error)
-	getNullableString() (*string, error)
-	getCompactString() (string, error)
-	getCompactNullableString() (*string, error)
-	getCompactInt32Array() ([]int32, error)
-	getInt32Array() ([]int32, error)
-	getInt64Array() ([]int64, error)
-	getStringArray() ([]string, error)
-
-	// Subsets
-	remaining() int
-	getSubset(length int) (packetDecoder, error)
-	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
-	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
-
-	// Stacks, see PushDecoder
-	push(in pushDecoder) error
-	pop() error
-}
-
-// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
-// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
-// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
-// depend upon have been decoded.
-type pushDecoder interface {
-	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and check the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
-	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
-	check(curOffset int, buf []byte) error
-}
-
-// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
-// fields itself is unknown until its value was decoded (for instance varint encoded length
-// fields).
-// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
-type dynamicPushDecoder interface {
-	pushDecoder
-	decoder
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
deleted file mode 100644
index aea53ca..0000000
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
-// Types implementing Encoder only need to worry about calling methods like PutString,
-// not about how a string is represented in Kafka.
-type packetEncoder interface {
-	// Primitives
-	putInt8(in int8)
-	putInt16(in int16)
-	putInt32(in int32)
-	putInt64(in int64)
-	putVarint(in int64)
-	putUVarint(in uint64)
-	putCompactArrayLength(in int)
-	putArrayLength(in int) error
-	putBool(in bool)
-
-	// Collections
-	putBytes(in []byte) error
-	putVarintBytes(in []byte) error
-	putCompactBytes(in []byte) error
-	putRawBytes(in []byte) error
-	putCompactString(in string) error
-	putNullableCompactString(in *string) error
-	putString(in string) error
-	putNullableString(in *string) error
-	putStringArray(in []string) error
-	putCompactInt32Array(in []int32) error
-	putNullableCompactInt32Array(in []int32) error
-	putInt32Array(in []int32) error
-	putInt64Array(in []int64) error
-	putEmptyTaggedFieldArray()
-
-	// Provide the current offset to record the batch size metric
-	offset() int
-
-	// Stacks, see PushEncoder
-	push(in pushEncoder)
-	pop() error
-
-	// To record metrics when provided
-	metricRegistry() metrics.Registry
-}
-
-// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
-// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
-// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
-// depend upon have been written.
-type pushEncoder interface {
-	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and write the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
-	// of data to the saved offset, based on the data between the saved offset and curOffset.
-	run(curOffset int, buf []byte) error
-}
-
-// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
-// fields itself is unknown until its value was computed (for instance varint encoded length
-// fields).
-type dynamicPushEncoder interface {
-	pushEncoder
-
-	// Called during pop() to adjust the length of the field.
-	// It should return the difference in bytes between the last computed length and current length.
-	adjustLength(currOffset int) int
-}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
deleted file mode 100644
index a66e11e..0000000
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package sarama
-
-import (
-	"hash"
-	"hash/fnv"
-	"math/rand"
-	"time"
-)
-
-// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
-// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
-// as simple default implementations.
-type Partitioner interface {
-	// Partition takes a message and partition count and chooses a partition
-	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
-
-	// RequiresConsistency indicates to the user of the partitioner whether the
-	// mapping of key->partition is consistent or not. Specifically, if a
-	// partitioner requires consistency then it must be allowed to choose from all
-	// partitions (even ones known to be unavailable), and its choice must be
-	// respected by the caller. The obvious example is the HashPartitioner.
-	RequiresConsistency() bool
-}
-
-// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
-// in order to allow more flexibility than is originally allowed by the
-// RequiresConsistency method in the Partitioner interface. This allows
-// partitioners to require consistency sometimes, but not all times. It's useful
-// for, e.g., the HashPartitioner, which does not require consistency if the
-// message key is nil.
-type DynamicConsistencyPartitioner interface {
-	Partitioner
-
-	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
-	// but takes in the message being partitioned so that the partitioner can
-	// make a per-message determination.
-	MessageRequiresConsistency(message *ProducerMessage) bool
-}
-
-// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
-type PartitionerConstructor func(topic string) Partitioner
-
-type manualPartitioner struct{}
-
-// HashPartitionerOption lets you modify default values of the partitioner
-type HashPartitionerOption func(*hashPartitioner)
-
-// WithAbsFirst means that the partitioner handles absolute values
-// in the same way as the reference Java implementation
-func WithAbsFirst() HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.referenceAbs = true
-	}
-}
-
-// WithCustomHashFunction lets you specify what hash function to use for the partitioning
-func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.hasher = hasher()
-	}
-}
-
-// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
-func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.random = hp
-	}
-}
-
-// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
-// ProducerMessage's Partition field as the partition to produce to.
-func NewManualPartitioner(topic string) Partitioner {
-	return new(manualPartitioner)
-}
-
-func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	return message.Partition, nil
-}
-
-func (p *manualPartitioner) RequiresConsistency() bool {
-	return true
-}
-
-type randomPartitioner struct {
-	generator *rand.Rand
-}
-
-// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
-func NewRandomPartitioner(topic string) Partitioner {
-	p := new(randomPartitioner)
-	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-	return p
-}
-
-func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	return int32(p.generator.Intn(int(numPartitions))), nil
-}
-
-func (p *randomPartitioner) RequiresConsistency() bool {
-	return false
-}
-
-type roundRobinPartitioner struct {
-	partition int32
-}
-
-// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
-func NewRoundRobinPartitioner(topic string) Partitioner {
-	return &roundRobinPartitioner{}
-}
-
-func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	if p.partition >= numPartitions {
-		p.partition = 0
-	}
-	ret := p.partition
-	p.partition++
-	return ret, nil
-}
-
-func (p *roundRobinPartitioner) RequiresConsistency() bool {
-	return false
-}
-
-type hashPartitioner struct {
-	random       Partitioner
-	hasher       hash.Hash32
-	referenceAbs bool
-}
-
-// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
-// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
-// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
-func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
-	return func(topic string) Partitioner {
-		p := new(hashPartitioner)
-		p.random = NewRandomPartitioner(topic)
-		p.hasher = hasher()
-		p.referenceAbs = false
-		return p
-	}
-}
-
-// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
-func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
-	return func(topic string) Partitioner {
-		p := new(hashPartitioner)
-		p.random = NewRandomPartitioner(topic)
-		p.hasher = fnv.New32a()
-		p.referenceAbs = false
-		for _, option := range options {
-			option(p)
-		}
-		return p
-	}
-}
-
-// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
-// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
-// modulus the number of partitions. This ensures that messages with the same key always end up on the
-// same partition.
-func NewHashPartitioner(topic string) Partitioner {
-	p := new(hashPartitioner)
-	p.random = NewRandomPartitioner(topic)
-	p.hasher = fnv.New32a()
-	p.referenceAbs = false
-	return p
-}
-
-// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
-// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
-// that but it had a mistake and now there are people depending on both behaviours. This will
-// all go away on the next major version bump.
-func NewReferenceHashPartitioner(topic string) Partitioner {
-	p := new(hashPartitioner)
-	p.random = NewRandomPartitioner(topic)
-	p.hasher = fnv.New32a()
-	p.referenceAbs = true
-	return p
-}
-
-func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	if message.Key == nil {
-		return p.random.Partition(message, numPartitions)
-	}
-	bytes, err := message.Key.Encode()
-	if err != nil {
-		return -1, err
-	}
-	p.hasher.Reset()
-	_, err = p.hasher.Write(bytes)
-	if err != nil {
-		return -1, err
-	}
-	var partition int32
-	// Turns out we were doing our absolute value in a subtly different way from the upstream
-	// implementation, but now we need to maintain backwards compat for people who started using
-	// the old version; if referenceAbs is set we are compatible with the reference java client
-	// but not past Sarama versions
-	if p.referenceAbs {
-		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
-	} else {
-		partition = int32(p.hasher.Sum32()) % numPartitions
-		if partition < 0 {
-			partition = -partition
-		}
-	}
-	return partition, nil
-}
-
-func (p *hashPartitioner) RequiresConsistency() bool {
-	return true
-}
-
-func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
-	return message.Key != nil
-}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
deleted file mode 100644
index 0d01374..0000000
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-type prepEncoder struct {
-	stack  []pushEncoder
-	length int
-}
-
-// primitives
-
-func (pe *prepEncoder) putInt8(in int8) {
-	pe.length++
-}
-
-func (pe *prepEncoder) putInt16(in int16) {
-	pe.length += 2
-}
-
-func (pe *prepEncoder) putInt32(in int32) {
-	pe.length += 4
-}
-
-func (pe *prepEncoder) putInt64(in int64) {
-	pe.length += 8
-}
-
-func (pe *prepEncoder) putVarint(in int64) {
-	var buf [binary.MaxVarintLen64]byte
-	pe.length += binary.PutVarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putUVarint(in uint64) {
-	var buf [binary.MaxVarintLen64]byte
-	pe.length += binary.PutUvarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putArrayLength(in int) error {
-	if in > math.MaxInt32 {
-		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
-	}
-	pe.length += 4
-	return nil
-}
-
-func (pe *prepEncoder) putCompactArrayLength(in int) {
-	pe.putUVarint(uint64(in + 1))
-}
-
-func (pe *prepEncoder) putBool(in bool) {
-	pe.length++
-}
-
-// arrays
-
-func (pe *prepEncoder) putBytes(in []byte) error {
-	pe.length += 4
-	if in == nil {
-		return nil
-	}
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putVarintBytes(in []byte) error {
-	if in == nil {
-		pe.putVarint(-1)
-		return nil
-	}
-	pe.putVarint(int64(len(in)))
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactBytes(in []byte) error {
-	pe.putUVarint(uint64(len(in) + 1))
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactString(in string) error {
-	pe.putCompactArrayLength(len(in))
-	return pe.putRawBytes([]byte(in))
-}
-
-func (pe *prepEncoder) putNullableCompactString(in *string) error {
-	if in == nil {
-		pe.putUVarint(0)
-		return nil
-	} else {
-		return pe.putCompactString(*in)
-	}
-}
-
-func (pe *prepEncoder) putRawBytes(in []byte) error {
-	if len(in) > math.MaxInt32 {
-		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putNullableString(in *string) error {
-	if in == nil {
-		pe.length += 2
-		return nil
-	}
-	return pe.putString(*in)
-}
-
-func (pe *prepEncoder) putString(in string) error {
-	pe.length += 2
-	if len(in) > math.MaxInt16 {
-		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putStringArray(in []string) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-
-	for _, str := range in {
-		if err := pe.putString(str); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (pe *prepEncoder) putCompactInt32Array(in []int32) error {
-	if in == nil {
-		return errors.New("expected int32 array to be non null")
-	}
-
-	pe.putUVarint(uint64(len(in)) + 1)
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error {
-	if in == nil {
-		pe.putUVarint(0)
-		return nil
-	}
-
-	pe.putUVarint(uint64(len(in)) + 1)
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt32Array(in []int32) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt64Array(in []int64) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 8 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putEmptyTaggedFieldArray() {
-	pe.putUVarint(0)
-}
-
-func (pe *prepEncoder) offset() int {
-	return pe.length
-}
-
-// stackable
-
-func (pe *prepEncoder) push(in pushEncoder) {
-	in.saveOffset(pe.length)
-	pe.length += in.reserveLength()
-	pe.stack = append(pe.stack, in)
-}
-
-func (pe *prepEncoder) pop() error {
-	in := pe.stack[len(pe.stack)-1]
-	pe.stack = pe.stack[:len(pe.stack)-1]
-	if dpe, ok := in.(dynamicPushEncoder); ok {
-		pe.length += dpe.adjustLength(pe.length)
-	}
-
-	return nil
-}
-
-// we do not record metrics during the prep encoder pass
-func (pe *prepEncoder) metricRegistry() metrics.Registry {
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
deleted file mode 100644
index 0034651..0000000
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
-// it must see before responding. Any of the constants defined here are valid. On broker versions
-// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
-// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
-// by setting the `min.isr` value in the brokers configuration).
-type RequiredAcks int16
-
-const (
-	// NoResponse doesn't send any response, the TCP ACK is all you get.
-	NoResponse RequiredAcks = 0
-	// WaitForLocal waits for only the local commit to succeed before responding.
-	WaitForLocal RequiredAcks = 1
-	// WaitForAll waits for all in-sync replicas to commit before responding.
-	// The minimum number of in-sync replicas is configured on the broker via
-	// the `min.insync.replicas` configuration key.
-	WaitForAll RequiredAcks = -1
-)
-
-type ProduceRequest struct {
-	TransactionalID *string
-	RequiredAcks    RequiredAcks
-	Timeout         int32
-	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
-	records         map[string]map[int32]Records
-}
-
-func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
-	topicCompressionRatioMetric metrics.Histogram) int64 {
-	var topicRecordCount int64
-	for _, messageBlock := range msgSet.Messages {
-		// Is this a fake "message" wrapping real messages?
-		if messageBlock.Msg.Set != nil {
-			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
-		} else {
-			// A single uncompressed message
-			topicRecordCount++
-		}
-		// Better be safe than sorry when computing the compression ratio
-		if messageBlock.Msg.compressedSize != 0 {
-			compressionRatio := float64(len(messageBlock.Msg.Value)) /
-				float64(messageBlock.Msg.compressedSize)
-			// Histogram do not support decimal values, let's multiple it by 100 for better precision
-			intCompressionRatio := int64(100 * compressionRatio)
-			compressionRatioMetric.Update(intCompressionRatio)
-			topicCompressionRatioMetric.Update(intCompressionRatio)
-		}
-	}
-	return topicRecordCount
-}
-
-func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
-	topicCompressionRatioMetric metrics.Histogram) int64 {
-	if recordBatch.compressedRecords != nil {
-		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
-		compressionRatioMetric.Update(compressionRatio)
-		topicCompressionRatioMetric.Update(compressionRatio)
-	}
-
-	return int64(len(recordBatch.Records))
-}
-
-func (r *ProduceRequest) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		if err := pe.putNullableString(r.TransactionalID); err != nil {
-			return err
-		}
-	}
-	pe.putInt16(int16(r.RequiredAcks))
-	pe.putInt32(r.Timeout)
-	metricRegistry := pe.metricRegistry()
-	var batchSizeMetric metrics.Histogram
-	var compressionRatioMetric metrics.Histogram
-	if metricRegistry != nil {
-		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
-		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
-	}
-	totalRecordCount := int64(0)
-
-	err := pe.putArrayLength(len(r.records))
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.records {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		topicRecordCount := int64(0)
-		var topicCompressionRatioMetric metrics.Histogram
-		if metricRegistry != nil {
-			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
-		}
-		for id, records := range partitions {
-			startOffset := pe.offset()
-			pe.putInt32(id)
-			pe.push(&lengthField{})
-			err = records.encode(pe)
-			if err != nil {
-				return err
-			}
-			err = pe.pop()
-			if err != nil {
-				return err
-			}
-			if metricRegistry != nil {
-				if r.Version >= 3 {
-					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
-				} else {
-					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
-				}
-				batchSize := int64(pe.offset() - startOffset)
-				batchSizeMetric.Update(batchSize)
-				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
-			}
-		}
-		if topicRecordCount > 0 {
-			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
-			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
-			totalRecordCount += topicRecordCount
-		}
-	}
-	if totalRecordCount > 0 {
-		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
-		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
-	}
-
-	return nil
-}
-
-func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-
-	if version >= 3 {
-		id, err := pd.getNullableString()
-		if err != nil {
-			return err
-		}
-		r.TransactionalID = id
-	}
-	requiredAcks, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.RequiredAcks = RequiredAcks(requiredAcks)
-	if r.Timeout, err = pd.getInt32(); err != nil {
-		return err
-	}
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-
-	r.records = make(map[string]map[int32]Records)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.records[topic] = make(map[int32]Records)
-
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			size, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			recordsDecoder, err := pd.getSubset(int(size))
-			if err != nil {
-				return err
-			}
-			var records Records
-			if err := records.decode(recordsDecoder); err != nil {
-				return err
-			}
-			r.records[topic][partition] = records
-		}
-	}
-
-	return nil
-}
-
-func (r *ProduceRequest) key() int16 {
-	return 0
-}
-
-func (r *ProduceRequest) version() int16 {
-	return r.Version
-}
-
-func (r *ProduceRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ProduceRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_11_0_0
-	case 7:
-		return V2_1_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
-	if r.records == nil {
-		r.records = make(map[string]map[int32]Records)
-	}
-
-	if r.records[topic] == nil {
-		r.records[topic] = make(map[int32]Records)
-	}
-}
-
-func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
-	r.ensureRecords(topic, partition)
-	set := r.records[topic][partition].MsgSet
-
-	if set == nil {
-		set = new(MessageSet)
-		r.records[topic][partition] = newLegacyRecords(set)
-	}
-
-	set.addMessage(msg)
-}
-
-func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
-	r.ensureRecords(topic, partition)
-	r.records[topic][partition] = newLegacyRecords(set)
-}
-
-func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
-	r.ensureRecords(topic, partition)
-	r.records[topic][partition] = newDefaultRecords(batch)
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
deleted file mode 100644
index edf9787..0000000
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-// Protocol, http://kafka.apache.org/protocol.html
-// v1
-// v2 = v3 = v4
-// v5 = v6 = v7
-// Produce Response (Version: 7) => [responses] throttle_time_ms
-//   responses => topic [partition_responses]
-//     topic => STRING
-//     partition_responses => partition error_code base_offset log_append_time log_start_offset
-//       partition => INT32
-//       error_code => INT16
-//       base_offset => INT64
-//       log_append_time => INT64
-//       log_start_offset => INT64
-//   throttle_time_ms => INT32
-
-// partition_responses in protocol
-type ProduceResponseBlock struct {
-	Err         KError    // v0, error_code
-	Offset      int64     // v0, base_offset
-	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
-	StartOffset int64     // v5, log_start_offset
-}
-
-func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 2 {
-		if millis, err := pd.getInt64(); err != nil {
-			return err
-		} else if millis != -1 {
-			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
-		}
-	}
-
-	if version >= 5 {
-		b.StartOffset, err = pd.getInt64()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-	pe.putInt64(b.Offset)
-
-	if version >= 2 {
-		timestamp := int64(-1)
-		if !b.Timestamp.Before(time.Unix(0, 0)) {
-			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
-		} else if !b.Timestamp.IsZero() {
-			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
-		}
-		pe.putInt64(timestamp)
-	}
-
-	if version >= 5 {
-		pe.putInt64(b.StartOffset)
-	}
-
-	return nil
-}
-
-type ProduceResponse struct {
-	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
-	Version      int16
-	ThrottleTime time.Duration // v1, throttle_time_ms
-}
-
-func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(ProduceResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	if r.Version >= 1 {
-		millis, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-
-		r.ThrottleTime = time.Duration(millis) * time.Millisecond
-	}
-
-	return nil
-}
-
-func (r *ProduceResponse) encode(pe packetEncoder) error {
-	err := pe.putArrayLength(len(r.Blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for id, prb := range partitions {
-			pe.putInt32(id)
-			err = prb.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	}
-	return nil
-}
-
-func (r *ProduceResponse) key() int16 {
-	return 0
-}
-
-func (r *ProduceResponse) version() int16 {
-	return r.Version
-}
-
-func (r *ProduceResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ProduceResponse) requiredVersion() KafkaVersion {
-	return MinVersion
-}
-
-func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-// Testing API
-
-func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
-	}
-	byTopic, ok := r.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*ProduceResponseBlock)
-		r.Blocks[topic] = byTopic
-	}
-	block := &ProduceResponseBlock{
-		Err: err,
-	}
-	if r.Version >= 2 {
-		block.Timestamp = time.Now()
-	}
-	byTopic[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
deleted file mode 100644
index 9c70f81..0000000
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"time"
-)
-
-type partitionSet struct {
-	msgs          []*ProducerMessage
-	recordsToSend Records
-	bufferBytes   int
-}
-
-type produceSet struct {
-	parent        *asyncProducer
-	msgs          map[string]map[int32]*partitionSet
-	producerID    int64
-	producerEpoch int16
-
-	bufferBytes int
-	bufferCount int
-}
-
-func newProduceSet(parent *asyncProducer) *produceSet {
-	pid, epoch := parent.txnmgr.getProducerID()
-	return &produceSet{
-		msgs:          make(map[string]map[int32]*partitionSet),
-		parent:        parent,
-		producerID:    pid,
-		producerEpoch: epoch,
-	}
-}
-
-func (ps *produceSet) add(msg *ProducerMessage) error {
-	var err error
-	var key, val []byte
-
-	if msg.Key != nil {
-		if key, err = msg.Key.Encode(); err != nil {
-			return err
-		}
-	}
-
-	if msg.Value != nil {
-		if val, err = msg.Value.Encode(); err != nil {
-			return err
-		}
-	}
-
-	timestamp := msg.Timestamp
-	if timestamp.IsZero() {
-		timestamp = time.Now()
-	}
-	timestamp = timestamp.Truncate(time.Millisecond)
-
-	partitions := ps.msgs[msg.Topic]
-	if partitions == nil {
-		partitions = make(map[int32]*partitionSet)
-		ps.msgs[msg.Topic] = partitions
-	}
-
-	var size int
-
-	set := partitions[msg.Partition]
-	if set == nil {
-		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-			batch := &RecordBatch{
-				FirstTimestamp:   timestamp,
-				Version:          2,
-				Codec:            ps.parent.conf.Producer.Compression,
-				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-				ProducerID:       ps.producerID,
-				ProducerEpoch:    ps.producerEpoch,
-			}
-			if ps.parent.conf.Producer.Idempotent {
-				batch.FirstSequence = msg.sequenceNumber
-			}
-			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
-			size = recordBatchOverhead
-		} else {
-			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
-		}
-		partitions[msg.Partition] = set
-	}
-
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
-			return errors.New("assertion failed: message out of sequence added to a batch")
-		}
-	}
-
-	// Past this point we can't return an error, because we've already added the message to the set.
-	set.msgs = append(set.msgs, msg)
-
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		// We are being conservative here to avoid having to prep encode the record
-		size += maximumRecordOverhead
-		rec := &Record{
-			Key:            key,
-			Value:          val,
-			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
-		}
-		size += len(key) + len(val)
-		if len(msg.Headers) > 0 {
-			rec.Headers = make([]*RecordHeader, len(msg.Headers))
-			for i := range msg.Headers {
-				rec.Headers[i] = &msg.Headers[i]
-				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
-			}
-		}
-		set.recordsToSend.RecordBatch.addRecord(rec)
-	} else {
-		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
-		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-			msgToSend.Timestamp = timestamp
-			msgToSend.Version = 1
-		}
-		set.recordsToSend.MsgSet.addMessage(msgToSend)
-		size = producerMessageOverhead + len(key) + len(val)
-	}
-
-	set.bufferBytes += size
-	ps.bufferBytes += size
-	ps.bufferCount++
-
-	return nil
-}
-
-func (ps *produceSet) buildRequest() *ProduceRequest {
-	req := &ProduceRequest{
-		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
-		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
-	}
-	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-		req.Version = 2
-	}
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		req.Version = 3
-	}
-
-	if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
-		req.Version = 7
-	}
-
-	for topic, partitionSets := range ps.msgs {
-		for partition, set := range partitionSets {
-			if req.Version >= 3 {
-				// If the API version we're hitting is 3 or greater, we need to calculate
-				// offsets for each record in the batch relative to FirstOffset.
-				// Additionally, we must set LastOffsetDelta to the value of the last offset
-				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
-				// final record of any batch will have an offset of (# of records in batch) - 1.
-				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
-				//  under the RecordBatch section for details.)
-				rb := set.recordsToSend.RecordBatch
-				if len(rb.Records) > 0 {
-					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
-					for i, record := range rb.Records {
-						record.OffsetDelta = int64(i)
-					}
-				}
-				req.AddBatch(topic, partition, rb)
-				continue
-			}
-			if ps.parent.conf.Producer.Compression == CompressionNone {
-				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
-			} else {
-				// When compression is enabled, the entire set for each partition is compressed
-				// and sent as the payload of a single fake "message" with the appropriate codec
-				// set and no key. When the server sees a message with a compression codec, it
-				// decompresses the payload and treats the result as its message set.
-
-				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-					// If our version is 0.10 or later, assign relative offsets
-					// to the inner messages. This lets the broker avoid
-					// recompressing the message set.
-					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
-					// for details on relative offsets.)
-					for i, msg := range set.recordsToSend.MsgSet.Messages {
-						msg.Offset = int64(i)
-					}
-				}
-				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
-				if err != nil {
-					Logger.Println(err) // if this happens, it's basically our fault.
-					panic(err)
-				}
-				compMsg := &Message{
-					Codec:            ps.parent.conf.Producer.Compression,
-					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-					Key:              nil,
-					Value:            payload,
-					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
-				}
-				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-					compMsg.Version = 1
-					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
-				}
-				req.AddMessage(topic, partition, compMsg)
-			}
-		}
-	}
-
-	return req
-}
-
-func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
-	for topic, partitionSet := range ps.msgs {
-		for partition, set := range partitionSet {
-			cb(topic, partition, set)
-		}
-	}
-}
-
-func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
-	if ps.msgs[topic] == nil {
-		return nil
-	}
-	set := ps.msgs[topic][partition]
-	if set == nil {
-		return nil
-	}
-	ps.bufferBytes -= set.bufferBytes
-	ps.bufferCount -= len(set.msgs)
-	delete(ps.msgs[topic], partition)
-	return set.msgs
-}
-
-func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
-	version := 1
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		version = 2
-	}
-
-	switch {
-	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
-	case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
-		return true
-	// Would we overflow the size-limit of a message-batch for this partition?
-	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
-		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
-		return true
-	// Would we overflow simply in number of messages?
-	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
-		return true
-	default:
-		return false
-	}
-}
-
-func (ps *produceSet) readyToFlush() bool {
-	switch {
-	// If we don't have any messages, nothing else matters
-	case ps.empty():
-		return false
-	// If all three config values are 0, we always flush as-fast-as-possible
-	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
-		return true
-	// If we've passed the message trigger-point
-	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
-		return true
-	// If we've passed the byte trigger-point
-	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
-		return true
-	default:
-		return false
-	}
-}
-
-func (ps *produceSet) empty() bool {
-	return ps.bufferCount == 0
-}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
deleted file mode 100644
index 2482c63..0000000
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"math"
-)
-
-var (
-	errInvalidArrayLength      = PacketDecodingError{"invalid array length"}
-	errInvalidByteSliceLength  = PacketDecodingError{"invalid byteslice length"}
-	errInvalidStringLength     = PacketDecodingError{"invalid string length"}
-	errVarintOverflow          = PacketDecodingError{"varint overflow"}
-	errUVarintOverflow         = PacketDecodingError{"uvarint overflow"}
-	errInvalidBool             = PacketDecodingError{"invalid bool"}
-	errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"}
-)
-
-type realDecoder struct {
-	raw   []byte
-	off   int
-	stack []pushDecoder
-}
-
-// primitives
-
-func (rd *realDecoder) getInt8() (int8, error) {
-	if rd.remaining() < 1 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int8(rd.raw[rd.off])
-	rd.off++
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt16() (int16, error) {
-	if rd.remaining() < 2 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
-	rd.off += 2
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt32() (int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt64() (int64, error) {
-	if rd.remaining() < 8 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-	rd.off += 8
-	return tmp, nil
-}
-
-func (rd *realDecoder) getVarint() (int64, error) {
-	tmp, n := binary.Varint(rd.raw[rd.off:])
-	if n == 0 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	if n < 0 {
-		rd.off -= n
-		return -1, errVarintOverflow
-	}
-	rd.off += n
-	return tmp, nil
-}
-
-func (rd *realDecoder) getUVarint() (uint64, error) {
-	tmp, n := binary.Uvarint(rd.raw[rd.off:])
-	if n == 0 {
-		rd.off = len(rd.raw)
-		return 0, ErrInsufficientData
-	}
-
-	if n < 0 {
-		rd.off -= n
-		return 0, errUVarintOverflow
-	}
-
-	rd.off += n
-	return tmp, nil
-}
-
-func (rd *realDecoder) getArrayLength() (int, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
-	rd.off += 4
-	if tmp > rd.remaining() {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	} else if tmp > 2*math.MaxUint16 {
-		return -1, errInvalidArrayLength
-	}
-	return tmp, nil
-}
-
-func (rd *realDecoder) getCompactArrayLength() (int, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return 0, err
-	}
-
-	if n == 0 {
-		return 0, nil
-	}
-
-	return int(n) - 1, nil
-}
-
-func (rd *realDecoder) getBool() (bool, error) {
-	b, err := rd.getInt8()
-	if err != nil || b == 0 {
-		return false, err
-	}
-	if b != 1 {
-		return false, errInvalidBool
-	}
-	return true, nil
-}
-
-func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
-	tagCount, err := rd.getUVarint()
-	if err != nil {
-		return 0, err
-	}
-
-	if tagCount != 0 {
-		return 0, errUnsupportedTaggedFields
-	}
-
-	return 0, nil
-}
-
-// collections
-
-func (rd *realDecoder) getBytes() ([]byte, error) {
-	tmp, err := rd.getInt32()
-	if err != nil {
-		return nil, err
-	}
-	if tmp == -1 {
-		return nil, nil
-	}
-
-	return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getVarintBytes() ([]byte, error) {
-	tmp, err := rd.getVarint()
-	if err != nil {
-		return nil, err
-	}
-	if tmp == -1 {
-		return nil, nil
-	}
-
-	return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getCompactBytes() ([]byte, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	length := int(n - 1)
-	return rd.getRawBytes(length)
-}
-
-func (rd *realDecoder) getStringLength() (int, error) {
-	length, err := rd.getInt16()
-	if err != nil {
-		return 0, err
-	}
-
-	n := int(length)
-
-	switch {
-	case n < -1:
-		return 0, errInvalidStringLength
-	case n > rd.remaining():
-		rd.off = len(rd.raw)
-		return 0, ErrInsufficientData
-	}
-
-	return n, nil
-}
-
-func (rd *realDecoder) getString() (string, error) {
-	n, err := rd.getStringLength()
-	if err != nil || n == -1 {
-		return "", err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+n])
-	rd.off += n
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getNullableString() (*string, error) {
-	n, err := rd.getStringLength()
-	if err != nil || n == -1 {
-		return nil, err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+n])
-	rd.off += n
-	return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactString() (string, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return "", err
-	}
-
-	length := int(n - 1)
-
-	tmpStr := string(rd.raw[rd.off : rd.off+length])
-	rd.off += length
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getCompactNullableString() (*string, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	length := int(n - 1)
-
-	if length < 0 {
-		return nil, err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+length])
-	rd.off += length
-	return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactInt32Array() ([]int32, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	arrayLength := int(n) - 1
-
-	ret := make([]int32, arrayLength)
-
-	for i := range ret {
-		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-		rd.off += 4
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getInt32Array() ([]int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 4*n {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]int32, n)
-	for i := range ret {
-		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-		rd.off += 4
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getInt64Array() ([]int64, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 8*n {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]int64, n)
-	for i := range ret {
-		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-		rd.off += 8
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getStringArray() ([]string, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]string, n)
-	for i := range ret {
-		str, err := rd.getString()
-		if err != nil {
-			return nil, err
-		}
-
-		ret[i] = str
-	}
-	return ret, nil
-}
-
-// subsets
-
-func (rd *realDecoder) remaining() int {
-	return len(rd.raw) - rd.off
-}
-
-func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
-	buf, err := rd.getRawBytes(length)
-	if err != nil {
-		return nil, err
-	}
-	return &realDecoder{raw: buf}, nil
-}
-
-func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
-	if length < 0 {
-		return nil, errInvalidByteSliceLength
-	} else if length > rd.remaining() {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	start := rd.off
-	rd.off += length
-	return rd.raw[start:rd.off], nil
-}
-
-func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
-	if rd.remaining() < offset+length {
-		return nil, ErrInsufficientData
-	}
-	off := rd.off + offset
-	return &realDecoder{raw: rd.raw[off : off+length]}, nil
-}
-
-func (rd *realDecoder) peekInt8(offset int) (int8, error) {
-	const byteLen = 1
-	if rd.remaining() < offset+byteLen {
-		return -1, ErrInsufficientData
-	}
-	return int8(rd.raw[rd.off+offset]), nil
-}
-
-// stacks
-
-func (rd *realDecoder) push(in pushDecoder) error {
-	in.saveOffset(rd.off)
-
-	var reserve int
-	if dpd, ok := in.(dynamicPushDecoder); ok {
-		if err := dpd.decode(rd); err != nil {
-			return err
-		}
-	} else {
-		reserve = in.reserveLength()
-		if rd.remaining() < reserve {
-			rd.off = len(rd.raw)
-			return ErrInsufficientData
-		}
-	}
-
-	rd.stack = append(rd.stack, in)
-
-	rd.off += reserve
-
-	return nil
-}
-
-func (rd *realDecoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := rd.stack[len(rd.stack)-1]
-	rd.stack = rd.stack[:len(rd.stack)-1]
-
-	return in.check(rd.off, rd.raw)
-}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
deleted file mode 100644
index c07204c..0000000
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-type realEncoder struct {
-	raw      []byte
-	off      int
-	stack    []pushEncoder
-	registry metrics.Registry
-}
-
-// primitives
-
-func (re *realEncoder) putInt8(in int8) {
-	re.raw[re.off] = byte(in)
-	re.off++
-}
-
-func (re *realEncoder) putInt16(in int16) {
-	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
-	re.off += 2
-}
-
-func (re *realEncoder) putInt32(in int32) {
-	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
-	re.off += 4
-}
-
-func (re *realEncoder) putInt64(in int64) {
-	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
-	re.off += 8
-}
-
-func (re *realEncoder) putVarint(in int64) {
-	re.off += binary.PutVarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putUVarint(in uint64) {
-	re.off += binary.PutUvarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putArrayLength(in int) error {
-	re.putInt32(int32(in))
-	return nil
-}
-
-func (re *realEncoder) putCompactArrayLength(in int) {
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(in + 1))
-}
-
-func (re *realEncoder) putBool(in bool) {
-	if in {
-		re.putInt8(1)
-		return
-	}
-	re.putInt8(0)
-}
-
-// collection
-
-func (re *realEncoder) putRawBytes(in []byte) error {
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putBytes(in []byte) error {
-	if in == nil {
-		re.putInt32(-1)
-		return nil
-	}
-	re.putInt32(int32(len(in)))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putVarintBytes(in []byte) error {
-	if in == nil {
-		re.putVarint(-1)
-		return nil
-	}
-	re.putVarint(int64(len(in)))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactBytes(in []byte) error {
-	re.putUVarint(uint64(len(in) + 1))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactString(in string) error {
-	re.putCompactArrayLength(len(in))
-	return re.putRawBytes([]byte(in))
-}
-
-func (re *realEncoder) putNullableCompactString(in *string) error {
-	if in == nil {
-		re.putInt8(0)
-		return nil
-	}
-	return re.putCompactString(*in)
-}
-
-func (re *realEncoder) putString(in string) error {
-	re.putInt16(int16(len(in)))
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putNullableString(in *string) error {
-	if in == nil {
-		re.putInt16(-1)
-		return nil
-	}
-	return re.putString(*in)
-}
-
-func (re *realEncoder) putStringArray(in []string) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-
-	for _, val := range in {
-		if err := re.putString(val); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (re *realEncoder) putCompactInt32Array(in []int32) error {
-	if in == nil {
-		return errors.New("expected int32 array to be non null")
-	}
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(len(in)) + 1)
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putNullableCompactInt32Array(in []int32) error {
-	if in == nil {
-		re.putUVarint(0)
-		return nil
-	}
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(len(in)) + 1)
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putInt32Array(in []int32) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putInt64Array(in []int64) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	for _, val := range in {
-		re.putInt64(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putEmptyTaggedFieldArray() {
-	re.putUVarint(0)
-}
-
-func (re *realEncoder) offset() int {
-	return re.off
-}
-
-// stacks
-
-func (re *realEncoder) push(in pushEncoder) {
-	in.saveOffset(re.off)
-	re.off += in.reserveLength()
-	re.stack = append(re.stack, in)
-}
-
-func (re *realEncoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := re.stack[len(re.stack)-1]
-	re.stack = re.stack[:len(re.stack)-1]
-
-	return in.run(re.off, re.raw)
-}
-
-// we do record metrics during the real encoder pass
-func (re *realEncoder) metricRegistry() metrics.Registry {
-	return re.registry
-}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
deleted file mode 100644
index c653763..0000000
--- a/vendor/github.com/Shopify/sarama/record_batch.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-const recordBatchOverhead = 49
-
-type recordsArray []*Record
-
-func (e recordsArray) encode(pe packetEncoder) error {
-	for _, r := range e {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (e recordsArray) decode(pd packetDecoder) error {
-	for i := range e {
-		rec := &Record{}
-		if err := rec.decode(pd); err != nil {
-			return err
-		}
-		e[i] = rec
-	}
-	return nil
-}
-
-type RecordBatch struct {
-	FirstOffset           int64
-	PartitionLeaderEpoch  int32
-	Version               int8
-	Codec                 CompressionCodec
-	CompressionLevel      int
-	Control               bool
-	LogAppendTime         bool
-	LastOffsetDelta       int32
-	FirstTimestamp        time.Time
-	MaxTimestamp          time.Time
-	ProducerID            int64
-	ProducerEpoch         int16
-	FirstSequence         int32
-	Records               []*Record
-	PartialTrailingRecord bool
-	IsTransactional       bool
-
-	compressedRecords []byte
-	recordsLen        int // uncompressed records size
-}
-
-func (b *RecordBatch) LastOffset() int64 {
-	return b.FirstOffset + int64(b.LastOffsetDelta)
-}
-
-func (b *RecordBatch) encode(pe packetEncoder) error {
-	if b.Version != 2 {
-		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
-	}
-	pe.putInt64(b.FirstOffset)
-	pe.push(&lengthField{})
-	pe.putInt32(b.PartitionLeaderEpoch)
-	pe.putInt8(b.Version)
-	pe.push(newCRC32Field(crcCastagnoli))
-	pe.putInt16(b.computeAttributes())
-	pe.putInt32(b.LastOffsetDelta)
-
-	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
-		return err
-	}
-
-	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
-		return err
-	}
-
-	pe.putInt64(b.ProducerID)
-	pe.putInt16(b.ProducerEpoch)
-	pe.putInt32(b.FirstSequence)
-
-	if err := pe.putArrayLength(len(b.Records)); err != nil {
-		return err
-	}
-
-	if b.compressedRecords == nil {
-		if err := b.encodeRecords(pe); err != nil {
-			return err
-		}
-	}
-	if err := pe.putRawBytes(b.compressedRecords); err != nil {
-		return err
-	}
-
-	if err := pe.pop(); err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (b *RecordBatch) decode(pd packetDecoder) (err error) {
-	if b.FirstOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	batchLen, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	if b.Version, err = pd.getInt8(); err != nil {
-		return err
-	}
-
-	crc32Decoder := acquireCrc32Field(crcCastagnoli)
-	defer releaseCrc32Field(crc32Decoder)
-
-	if err = pd.push(crc32Decoder); err != nil {
-		return err
-	}
-
-	attributes, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
-	b.Control = attributes&controlMask == controlMask
-	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
-	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
-
-	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
-		return err
-	}
-
-	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
-		return err
-	}
-
-	if b.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.FirstSequence, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	numRecs, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if numRecs >= 0 {
-		b.Records = make([]*Record, numRecs)
-	}
-
-	bufSize := int(batchLen) - recordBatchOverhead
-	recBuffer, err := pd.getRawBytes(bufSize)
-	if err != nil {
-		if err == ErrInsufficientData {
-			b.PartialTrailingRecord = true
-			b.Records = nil
-			return nil
-		}
-		return err
-	}
-
-	if err = pd.pop(); err != nil {
-		return err
-	}
-
-	recBuffer, err = decompress(b.Codec, recBuffer)
-	if err != nil {
-		return err
-	}
-
-	b.recordsLen = len(recBuffer)
-	err = decode(recBuffer, recordsArray(b.Records))
-	if err == ErrInsufficientData {
-		b.PartialTrailingRecord = true
-		b.Records = nil
-		return nil
-	}
-	return err
-}
-
-func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
-	var raw []byte
-	var err error
-	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
-		return err
-	}
-	b.recordsLen = len(raw)
-
-	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
-	return err
-}
-
-func (b *RecordBatch) computeAttributes() int16 {
-	attr := int16(b.Codec) & int16(compressionCodecMask)
-	if b.Control {
-		attr |= controlMask
-	}
-	if b.LogAppendTime {
-		attr |= timestampTypeMask
-	}
-	if b.IsTransactional {
-		attr |= isTransactionalMask
-	}
-	return attr
-}
-
-func (b *RecordBatch) addRecord(r *Record) {
-	b.Records = append(b.Records, r)
-}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
deleted file mode 100644
index f4c5e95..0000000
--- a/vendor/github.com/Shopify/sarama/records.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
-	unknownRecords = iota
-	legacyRecords
-	defaultRecords
-
-	magicOffset = 16
-)
-
-// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
-type Records struct {
-	recordsType int
-	MsgSet      *MessageSet
-	RecordBatch *RecordBatch
-}
-
-func newLegacyRecords(msgSet *MessageSet) Records {
-	return Records{recordsType: legacyRecords, MsgSet: msgSet}
-}
-
-func newDefaultRecords(batch *RecordBatch) Records {
-	return Records{recordsType: defaultRecords, RecordBatch: batch}
-}
-
-// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
-// The first return value indicates whether both fields are nil (and the type is not set).
-// If both fields are not nil, it returns an error.
-func (r *Records) setTypeFromFields() (bool, error) {
-	if r.MsgSet == nil && r.RecordBatch == nil {
-		return true, nil
-	}
-	if r.MsgSet != nil && r.RecordBatch != nil {
-		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
-	}
-	r.recordsType = defaultRecords
-	if r.MsgSet != nil {
-		r.recordsType = legacyRecords
-	}
-	return false, nil
-}
-
-func (r *Records) encode(pe packetEncoder) error {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return nil
-		}
-		return r.MsgSet.encode(pe)
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return nil
-		}
-		return r.RecordBatch.encode(pe)
-	}
-
-	return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) setTypeFromMagic(pd packetDecoder) error {
-	magic, err := magicValue(pd)
-	if err != nil {
-		return err
-	}
-
-	r.recordsType = defaultRecords
-	if magic < 2 {
-		r.recordsType = legacyRecords
-	}
-
-	return nil
-}
-
-func (r *Records) decode(pd packetDecoder) error {
-	if r.recordsType == unknownRecords {
-		if err := r.setTypeFromMagic(pd); err != nil {
-			return err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		r.MsgSet = &MessageSet{}
-		return r.MsgSet.decode(pd)
-	case defaultRecords:
-		r.RecordBatch = &RecordBatch{}
-		return r.RecordBatch.decode(pd)
-	}
-	return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) numRecords() (int, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return 0, err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return 0, nil
-		}
-		return len(r.MsgSet.Messages), nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return 0, nil
-		}
-		return len(r.RecordBatch.Records), nil
-	}
-	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isPartial() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case unknownRecords:
-		return false, nil
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return false, nil
-		}
-		return r.MsgSet.PartialTrailingMessage, nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return false, nil
-		}
-		return r.RecordBatch.PartialTrailingRecord, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isControl() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		return false, nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return false, nil
-		}
-		return r.RecordBatch.Control, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isOverflow() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case unknownRecords:
-		return false, nil
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return false, nil
-		}
-		return r.MsgSet.OverflowMessage, nil
-	case defaultRecords:
-		return false, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func magicValue(pd packetDecoder) (int8, error) {
-	return pd.peekInt8(magicOffset)
-}
-
-func (r *Records) getControlRecord() (ControlRecord, error) {
-	if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
-		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
-	}
-
-	firstRecord := r.RecordBatch.Records[0]
-	controlRecord := ControlRecord{}
-	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
-	if err != nil {
-		return ControlRecord{}, err
-	}
-
-	return controlRecord, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
deleted file mode 100644
index d899df5..0000000
--- a/vendor/github.com/Shopify/sarama/request.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-type protocolBody interface {
-	encoder
-	versionedDecoder
-	key() int16
-	version() int16
-	headerVersion() int16
-	requiredVersion() KafkaVersion
-}
-
-type request struct {
-	correlationID int32
-	clientID      string
-	body          protocolBody
-}
-
-func (r *request) encode(pe packetEncoder) error {
-	pe.push(&lengthField{})
-	pe.putInt16(r.body.key())
-	pe.putInt16(r.body.version())
-	pe.putInt32(r.correlationID)
-
-	if r.body.headerVersion() >= 1 {
-		err := pe.putString(r.clientID)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.body.headerVersion() >= 2 {
-		// we don't use tag headers at the moment so we just put an array length of 0
-		pe.putUVarint(0)
-	}
-
-	err := r.body.encode(pe)
-	if err != nil {
-		return err
-	}
-
-	return pe.pop()
-}
-
-func (r *request) decode(pd packetDecoder) (err error) {
-	key, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	version, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.correlationID, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	r.clientID, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	r.body = allocateBody(key, version)
-	if r.body == nil {
-		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
-	}
-
-	if r.body.headerVersion() >= 2 {
-		// tagged field
-		_, err = pd.getUVarint()
-		if err != nil {
-			return err
-		}
-	}
-
-	return r.body.decode(pd, version)
-}
-
-func decodeRequest(r io.Reader) (*request, int, error) {
-	var (
-		bytesRead   int
-		lengthBytes = make([]byte, 4)
-	)
-
-	if _, err := io.ReadFull(r, lengthBytes); err != nil {
-		return nil, bytesRead, err
-	}
-
-	bytesRead += len(lengthBytes)
-	length := int32(binary.BigEndian.Uint32(lengthBytes))
-
-	if length <= 4 || length > MaxRequestSize {
-		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
-	}
-
-	encodedReq := make([]byte, length)
-	if _, err := io.ReadFull(r, encodedReq); err != nil {
-		return nil, bytesRead, err
-	}
-
-	bytesRead += len(encodedReq)
-
-	req := &request{}
-	if err := decode(encodedReq, req); err != nil {
-		return nil, bytesRead, err
-	}
-
-	return req, bytesRead, nil
-}
-
-func allocateBody(key, version int16) protocolBody {
-	switch key {
-	case 0:
-		return &ProduceRequest{}
-	case 1:
-		return &FetchRequest{Version: version}
-	case 2:
-		return &OffsetRequest{Version: version}
-	case 3:
-		return &MetadataRequest{}
-	case 8:
-		return &OffsetCommitRequest{Version: version}
-	case 9:
-		return &OffsetFetchRequest{Version: version}
-	case 10:
-		return &FindCoordinatorRequest{}
-	case 11:
-		return &JoinGroupRequest{}
-	case 12:
-		return &HeartbeatRequest{}
-	case 13:
-		return &LeaveGroupRequest{}
-	case 14:
-		return &SyncGroupRequest{}
-	case 15:
-		return &DescribeGroupsRequest{}
-	case 16:
-		return &ListGroupsRequest{}
-	case 17:
-		return &SaslHandshakeRequest{}
-	case 18:
-		return &ApiVersionsRequest{}
-	case 19:
-		return &CreateTopicsRequest{}
-	case 20:
-		return &DeleteTopicsRequest{}
-	case 21:
-		return &DeleteRecordsRequest{}
-	case 22:
-		return &InitProducerIDRequest{}
-	case 24:
-		return &AddPartitionsToTxnRequest{}
-	case 25:
-		return &AddOffsetsToTxnRequest{}
-	case 26:
-		return &EndTxnRequest{}
-	case 28:
-		return &TxnOffsetCommitRequest{}
-	case 29:
-		return &DescribeAclsRequest{}
-	case 30:
-		return &CreateAclsRequest{}
-	case 31:
-		return &DeleteAclsRequest{}
-	case 32:
-		return &DescribeConfigsRequest{}
-	case 33:
-		return &AlterConfigsRequest{}
-	case 35:
-		return &DescribeLogDirsRequest{}
-	case 36:
-		return &SaslAuthenticateRequest{}
-	case 37:
-		return &CreatePartitionsRequest{}
-	case 42:
-		return &DeleteGroupsRequest{}
-	case 44:
-		return &IncrementalAlterConfigsRequest{}
-	case 45:
-		return &AlterPartitionReassignmentsRequest{}
-	case 46:
-		return &ListPartitionReassignmentsRequest{}
-	case 50:
-		return &DescribeUserScramCredentialsRequest{}
-	case 51:
-		return &AlterUserScramCredentialsRequest{}
-	}
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
deleted file mode 100644
index fbcef0b..0000000
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
-	responseLengthSize = 4
-	correlationIDSize  = 4
-)
-
-type responseHeader struct {
-	length        int32
-	correlationID int32
-}
-
-func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
-	r.length, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if r.length <= 4 || r.length > MaxResponseSize {
-		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
-	}
-
-	r.correlationID, err = pd.getInt32()
-
-	if version >= 1 {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return err
-}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
deleted file mode 100644
index 48f362d..0000000
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
-API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
-API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
-
-To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
-and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
-The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
-useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
-depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
-SyncProducer can still sometimes be lost.
-
-To consume messages, use Consumer or Consumer-Group API.
-
-For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
-and message sent on the wire; the Client provides higher-level metadata management that is shared between
-the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
-exactly with the protocol fields documented by Kafka at
-https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-
-Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
-
-Broker related metrics:
-
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-	| Name                                         | Type       | Description                                                   |
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-	| incoming-byte-rate                           | meter      | Bytes/second read off all brokers                             |
-	| incoming-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second read off a given broker                          |
-	| outgoing-byte-rate                           | meter      | Bytes/second written off all brokers                          |
-	| outgoing-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second written off a given broker                       |
-	| request-rate                                 | meter      | Requests/second sent to all brokers                           |
-	| request-rate-for-broker-<broker-id>          | meter      | Requests/second sent to a given broker                        |
-	| request-size                                 | histogram  | Distribution of the request size in bytes for all brokers     |
-	| request-size-for-broker-<broker-id>          | histogram  | Distribution of the request size in bytes for a given broker  |
-	| request-latency-in-ms                        | histogram  | Distribution of the request latency in ms for all brokers     |
-	| request-latency-in-ms-for-broker-<broker-id> | histogram  | Distribution of the request latency in ms for a given broker  |
-	| response-rate                                | meter      | Responses/second received from all brokers                    |
-	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
-	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
-	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
-	| requests-in-flight                           | counter    | The current number of in-flight requests awaiting a response  |
-	|                                              |            | for all brokers                                               |
-	| requests-in-flight-for-broker-<broker-id>    | counter    | The current number of in-flight requests awaiting a response  |
-	|                                              |            | for a given broker                                            |
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-
-Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
-
-Producer related metrics:
-
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| Name                                      | Type       | Description                                                                          |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
-	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
-	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
-	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
-	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
-	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
-	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
-	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-Consumer related metrics:
-
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| Name                                      | Type       | Description                                                                          |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-*/
-package sarama
-
-import (
-	"io/ioutil"
-	"log"
-)
-
-var (
-	// Logger is the instance of a StdLogger interface that Sarama writes connection
-	// management events to. By default it is set to discard all log messages via ioutil.Discard,
-	// but you can set it to redirect wherever you want.
-	Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
-
-	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
-	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
-	PanicHandler func(interface{})
-
-	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
-	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
-	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
-	// to process.
-	MaxRequestSize int32 = 100 * 1024 * 1024
-
-	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
-	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
-	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
-	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
-	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
-	MaxResponseSize int32 = 100 * 1024 * 1024
-)
-
-// StdLogger is used to log error messages.
-type StdLogger interface {
-	Print(v ...interface{})
-	Printf(format string, v ...interface{})
-	Println(v ...interface{})
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
deleted file mode 100644
index 90504df..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-type SaslAuthenticateRequest struct {
-	SaslAuthBytes []byte
-}
-
-// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API
-const APIKeySASLAuth = 36
-
-func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
-	return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.SaslAuthBytes, err = pd.getBytes()
-	return err
-}
-
-func (r *SaslAuthenticateRequest) key() int16 {
-	return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateRequest) version() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
deleted file mode 100644
index 3ef57b5..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-type SaslAuthenticateResponse struct {
-	Err           KError
-	ErrorMessage  *string
-	SaslAuthBytes []byte
-}
-
-func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	if err := pe.putNullableString(r.ErrorMessage); err != nil {
-		return err
-	}
-	return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	r.SaslAuthBytes, err = pd.getBytes()
-
-	return err
-}
-
-func (r *SaslAuthenticateResponse) key() int16 {
-	return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateResponse) version() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
deleted file mode 100644
index 74dc307..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-type SaslHandshakeRequest struct {
-	Mechanism string
-	Version   int16
-}
-
-func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.Mechanism); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.Mechanism, err = pd.getString(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeRequest) key() int16 {
-	return 17
-}
-
-func (r *SaslHandshakeRequest) version() int16 {
-	return r.Version
-}
-
-func (r *SaslHandshakeRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
deleted file mode 100644
index 69dfc31..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package sarama
-
-type SaslHandshakeResponse struct {
-	Err               KError
-	EnabledMechanisms []string
-}
-
-func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return pe.putStringArray(r.EnabledMechanisms)
-}
-
-func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeResponse) key() int16 {
-	return 17
-}
-
-func (r *SaslHandshakeResponse) version() int16 {
-	return 0
-}
-
-func (r *SaslHandshakeResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
deleted file mode 100644
index ac6ecb1..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package sarama
-
-type SyncGroupRequest struct {
-	GroupId          string
-	GenerationId     int32
-	MemberId         string
-	GroupAssignments map[string][]byte
-}
-
-func (r *SyncGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
-		return err
-	}
-	for memberId, memberAssignment := range r.GroupAssignments {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-		if err := pe.putBytes(memberAssignment); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupAssignments = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		memberAssignment, err := pd.getBytes()
-		if err != nil {
-			return err
-		}
-
-		r.GroupAssignments[memberId] = memberAssignment
-	}
-
-	return nil
-}
-
-func (r *SyncGroupRequest) key() int16 {
-	return 14
-}
-
-func (r *SyncGroupRequest) version() int16 {
-	return 0
-}
-
-func (r *SyncGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
-	if r.GroupAssignments == nil {
-		r.GroupAssignments = make(map[string][]byte)
-	}
-
-	r.GroupAssignments[memberId] = memberAssignment
-}
-
-func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
-	bin, err := encode(memberAssignment, nil)
-	if err != nil {
-		return err
-	}
-
-	r.AddGroupAssignment(memberId, bin)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
deleted file mode 100644
index af019c4..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package sarama
-
-type SyncGroupResponse struct {
-	Err              KError
-	MemberAssignment []byte
-}
-
-func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
-	assignment := new(ConsumerGroupMemberAssignment)
-	err := decode(r.MemberAssignment, assignment)
-	return assignment, err
-}
-
-func (r *SyncGroupResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return pe.putBytes(r.MemberAssignment)
-}
-
-func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	r.MemberAssignment, err = pd.getBytes()
-	return
-}
-
-func (r *SyncGroupResponse) key() int16 {
-	return 14
-}
-
-func (r *SyncGroupResponse) version() int16 {
-	return 0
-}
-
-func (r *SyncGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
deleted file mode 100644
index 021c5a0..0000000
--- a/vendor/github.com/Shopify/sarama/sync_producer.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package sarama
-
-import "sync"
-
-// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
-// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
-// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
-//
-// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
-// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
-// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
-//
-// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
-// be set to true in its configuration.
-type SyncProducer interface {
-
-	// SendMessage produces a given message, and returns only when it either has
-	// succeeded or failed to produce. It will return the partition and the offset
-	// of the produced message, or an error if the message failed to produce.
-	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
-
-	// SendMessages produces a given set of messages, and returns only when all
-	// messages in the set have either succeeded or failed. Note that messages
-	// can succeed and fail individually; if some succeed and some fail,
-	// SendMessages will return an error.
-	SendMessages(msgs []*ProducerMessage) error
-
-	// Close shuts down the producer and waits for any buffered messages to be
-	// flushed. You must call this function before a producer object passes out of
-	// scope, as it may otherwise leak memory. You must call this before calling
-	// Close on the underlying client.
-	Close() error
-}
-
-type syncProducer struct {
-	producer *asyncProducer
-	wg       sync.WaitGroup
-}
-
-// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
-func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
-	if config == nil {
-		config = NewConfig()
-		config.Producer.Return.Successes = true
-	}
-
-	if err := verifyProducerConfig(config); err != nil {
-		return nil, err
-	}
-
-	p, err := NewAsyncProducer(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
-	if err := verifyProducerConfig(client.Config()); err != nil {
-		return nil, err
-	}
-
-	p, err := NewAsyncProducerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
-	sp := &syncProducer{producer: p}
-
-	sp.wg.Add(2)
-	go withRecover(sp.handleSuccesses)
-	go withRecover(sp.handleErrors)
-
-	return sp
-}
-
-func verifyProducerConfig(config *Config) error {
-	if !config.Producer.Return.Errors {
-		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
-	}
-	if !config.Producer.Return.Successes {
-		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
-	}
-	return nil
-}
-
-func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
-	expectation := make(chan *ProducerError, 1)
-	msg.expectation = expectation
-	sp.producer.Input() <- msg
-
-	if err := <-expectation; err != nil {
-		return -1, -1, err.Err
-	}
-
-	return msg.Partition, msg.Offset, nil
-}
-
-func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
-	expectations := make(chan chan *ProducerError, len(msgs))
-	go func() {
-		for _, msg := range msgs {
-			expectation := make(chan *ProducerError, 1)
-			msg.expectation = expectation
-			sp.producer.Input() <- msg
-			expectations <- expectation
-		}
-		close(expectations)
-	}()
-
-	var errors ProducerErrors
-	for expectation := range expectations {
-		if err := <-expectation; err != nil {
-			errors = append(errors, err)
-		}
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (sp *syncProducer) handleSuccesses() {
-	defer sp.wg.Done()
-	for msg := range sp.producer.Successes() {
-		expectation := msg.expectation
-		expectation <- nil
-	}
-}
-
-func (sp *syncProducer) handleErrors() {
-	defer sp.wg.Done()
-	for err := range sp.producer.Errors() {
-		expectation := err.Msg.expectation
-		expectation <- err
-	}
-}
-
-func (sp *syncProducer) Close() error {
-	sp.producer.AsyncClose()
-	sp.wg.Wait()
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
deleted file mode 100644
index c4043a3..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type TxnOffsetCommitRequest struct {
-	TransactionalID string
-	GroupID         string
-	ProducerID      int64
-	ProducerEpoch   int16
-	Topics          map[string][]*PartitionOffsetMetadata
-}
-
-func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(t.TransactionalID); err != nil {
-		return err
-	}
-	if err := pe.putString(t.GroupID); err != nil {
-		return err
-	}
-	pe.putInt64(t.ProducerID)
-	pe.putInt16(t.ProducerEpoch)
-
-	if err := pe.putArrayLength(len(t.Topics)); err != nil {
-		return err
-	}
-	for topic, partitions := range t.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for _, partition := range partitions {
-			if err := partition.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
-	if t.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if t.GroupID, err = pd.getString(); err != nil {
-		return err
-	}
-	if t.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	t.Topics = make(map[string][]*PartitionOffsetMetadata)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
-
-		for j := 0; j < m; j++ {
-			partitionOffsetMetadata := new(PartitionOffsetMetadata)
-			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
-				return err
-			}
-			t.Topics[topic][j] = partitionOffsetMetadata
-		}
-	}
-
-	return nil
-}
-
-func (a *TxnOffsetCommitRequest) key() int16 {
-	return 28
-}
-
-func (a *TxnOffsetCommitRequest) version() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type PartitionOffsetMetadata struct {
-	Partition int32
-	Offset    int64
-	Metadata  *string
-}
-
-func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
-	pe.putInt32(p.Partition)
-	pe.putInt64(p.Offset)
-	if err := pe.putNullableString(p.Metadata); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
-	if p.Partition, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if p.Offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if p.Metadata, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
deleted file mode 100644
index 94d8029..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type TxnOffsetCommitResponse struct {
-	ThrottleTime time.Duration
-	Topics       map[string][]*PartitionError
-}
-
-func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(t.Topics)); err != nil {
-		return err
-	}
-
-	for topic, e := range t.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(e)); err != nil {
-			return err
-		}
-		for _, partitionError := range e {
-			if err := partitionError.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	t.Topics = make(map[string][]*PartitionError)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		t.Topics[topic] = make([]*PartitionError, m)
-
-		for j := 0; j < m; j++ {
-			t.Topics[topic][j] = new(PartitionError)
-			if err := t.Topics[topic][j].decode(pd, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *TxnOffsetCommitResponse) key() int16 {
-	return 28
-}
-
-func (a *TxnOffsetCommitResponse) version() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
deleted file mode 100644
index 1859d29..0000000
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package sarama
-
-import (
-	"bufio"
-	"fmt"
-	"net"
-	"regexp"
-)
-
-type none struct{}
-
-// make []int32 sortable so we can sort partition numbers
-type int32Slice []int32
-
-func (slice int32Slice) Len() int {
-	return len(slice)
-}
-
-func (slice int32Slice) Less(i, j int) bool {
-	return slice[i] < slice[j]
-}
-
-func (slice int32Slice) Swap(i, j int) {
-	slice[i], slice[j] = slice[j], slice[i]
-}
-
-func dupInt32Slice(input []int32) []int32 {
-	ret := make([]int32, 0, len(input))
-	ret = append(ret, input...)
-	return ret
-}
-
-func withRecover(fn func()) {
-	defer func() {
-		handler := PanicHandler
-		if handler != nil {
-			if err := recover(); err != nil {
-				handler(err)
-			}
-		}
-	}()
-
-	fn()
-}
-
-func safeAsyncClose(b *Broker) {
-	tmp := b // local var prevents clobbering in goroutine
-	go withRecover(func() {
-		if connected, _ := tmp.Connected(); connected {
-			if err := tmp.Close(); err != nil {
-				Logger.Println("Error closing broker", tmp.ID(), ":", err)
-			}
-		}
-	})
-}
-
-// Encoder is a simple interface for any type that can be encoded as an array of bytes
-// in order to be sent as the key or value of a Kafka message. Length() is provided as an
-// optimization, and must return the same as len() on the result of Encode().
-type Encoder interface {
-	Encode() ([]byte, error)
-	Length() int
-}
-
-// make strings and byte slices encodable for convenience so they can be used as keys
-// and/or values in kafka messages
-
-// StringEncoder implements the Encoder interface for Go strings so that they can be used
-// as the Key or Value in a ProducerMessage.
-type StringEncoder string
-
-func (s StringEncoder) Encode() ([]byte, error) {
-	return []byte(s), nil
-}
-
-func (s StringEncoder) Length() int {
-	return len(s)
-}
-
-// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
-// as the Key or Value in a ProducerMessage.
-type ByteEncoder []byte
-
-func (b ByteEncoder) Encode() ([]byte, error) {
-	return b, nil
-}
-
-func (b ByteEncoder) Length() int {
-	return len(b)
-}
-
-// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
-// reads that trigger syscalls.
-type bufConn struct {
-	net.Conn
-	buf *bufio.Reader
-}
-
-func newBufConn(conn net.Conn) *bufConn {
-	return &bufConn{
-		Conn: conn,
-		buf:  bufio.NewReader(conn),
-	}
-}
-
-func (bc *bufConn) Read(b []byte) (n int, err error) {
-	return bc.buf.Read(b)
-}
-
-// KafkaVersion instances represent versions of the upstream Kafka broker.
-type KafkaVersion struct {
-	// it's a struct rather than just typing the array directly to make it opaque and stop people
-	// generating their own arbitrary versions
-	version [4]uint
-}
-
-func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
-	return KafkaVersion{
-		version: [4]uint{major, minor, veryMinor, patch},
-	}
-}
-
-// IsAtLeast return true if and only if the version it is called on is
-// greater than or equal to the version passed in:
-//    V1.IsAtLeast(V2) // false
-//    V2.IsAtLeast(V1) // true
-func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
-	for i := range v.version {
-		if v.version[i] > other.version[i] {
-			return true
-		} else if v.version[i] < other.version[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// Effective constants defining the supported kafka versions.
-var (
-	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
-	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
-	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
-	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
-	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
-	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
-	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
-	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
-	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
-	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
-	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
-	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
-	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
-	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
-	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
-	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
-	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
-	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
-	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
-	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
-	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
-	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
-	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
-	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
-	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
-	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
-	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
-
-	SupportedVersions = []KafkaVersion{
-		V0_8_2_0,
-		V0_8_2_1,
-		V0_8_2_2,
-		V0_9_0_0,
-		V0_9_0_1,
-		V0_10_0_0,
-		V0_10_0_1,
-		V0_10_1_0,
-		V0_10_1_1,
-		V0_10_2_0,
-		V0_10_2_1,
-		V0_11_0_0,
-		V0_11_0_1,
-		V0_11_0_2,
-		V1_0_0_0,
-		V1_1_0_0,
-		V1_1_1_0,
-		V2_0_0_0,
-		V2_0_1_0,
-		V2_1_0_0,
-		V2_2_0_0,
-		V2_3_0_0,
-		V2_4_0_0,
-		V2_5_0_0,
-		V2_6_0_0,
-		V2_7_0_0,
-		V2_8_0_0,
-	}
-	MinVersion     = V0_8_2_0
-	MaxVersion     = V2_8_0_0
-	DefaultVersion = V1_0_0_0
-)
-
-// ParseKafkaVersion parses and returns kafka version or error from a string
-func ParseKafkaVersion(s string) (KafkaVersion, error) {
-	if len(s) < 5 {
-		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
-	}
-	var major, minor, veryMinor, patch uint
-	var err error
-	if s[0] == '0' {
-		err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
-	} else {
-		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
-	}
-	if err != nil {
-		return DefaultVersion, err
-	}
-	return newKafkaVersion(major, minor, veryMinor, patch), nil
-}
-
-func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
-	if !regexp.MustCompile(pattern).MatchString(s) {
-		return fmt.Errorf("invalid version `%s`", s)
-	}
-	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
-	return err
-}
-
-func (v KafkaVersion) String() string {
-	if v.version[0] == 0 {
-		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
-	}
-
-	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
-}
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
deleted file mode 100644
index e23bfc4..0000000
--- a/vendor/github.com/Shopify/sarama/zstd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package sarama
-
-import (
-	"github.com/klauspost/compress/zstd"
-)
-
-var (
-	zstdDec, _ = zstd.NewReader(nil)
-	zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
-)
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return zstdDec.DecodeAll(src, dst)
-}
-
-func zstdCompress(dst, src []byte) ([]byte, error) {
-	return zstdEnc.EncodeAll(src, dst), nil
-}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..d7d14f8
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
+			} else {
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(math.Ceil(float64(l) * q))
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore
deleted file mode 100644
index 88113c5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.log
-*.pid
-kafka*/
-vendor/
diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml
deleted file mode 100644
index 07c7c10..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-sudo: false
-language: go
-go:
-  - 1.10.x
-  - 1.9.x
-install:
-  - go get -u github.com/golang/dep/cmd/dep
-  - dep ensure
-env:
-  - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1
-  - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.1
-  - SCALA_VERSION=2.12 KAFKA_VERSION=1.1.0
-script:
-  - make default test-race
-addons:
-  apt:
-    packages:
-      - oracle-java8-set-default
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
deleted file mode 100644
index e1bc110..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
+++ /dev/null
@@ -1,151 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "github.com/Shopify/sarama"
-  packages = ["."]
-  revision = "35324cf48e33d8260e1c7c18854465a904ade249"
-  version = "v1.17.0"
-
-[[projects]]
-  name = "github.com/davecgh/go-spew"
-  packages = ["spew"]
-  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
-  version = "v1.1.0"
-
-[[projects]]
-  name = "github.com/eapache/go-resiliency"
-  packages = ["breaker"]
-  revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
-  version = "v1.1.0"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/eapache/go-xerial-snappy"
-  packages = ["."]
-  revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
-
-[[projects]]
-  name = "github.com/eapache/queue"
-  packages = ["."]
-  revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
-  version = "v1.1.0"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/golang/snappy"
-  packages = ["."]
-  revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
-
-[[projects]]
-  name = "github.com/onsi/ginkgo"
-  packages = [
-    ".",
-    "config",
-    "extensions/table",
-    "internal/codelocation",
-    "internal/containernode",
-    "internal/failer",
-    "internal/leafnodes",
-    "internal/remote",
-    "internal/spec",
-    "internal/spec_iterator",
-    "internal/specrunner",
-    "internal/suite",
-    "internal/testingtproxy",
-    "internal/writer",
-    "reporters",
-    "reporters/stenographer",
-    "reporters/stenographer/support/go-colorable",
-    "reporters/stenographer/support/go-isatty",
-    "types"
-  ]
-  revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf"
-  version = "v1.5.0"
-
-[[projects]]
-  name = "github.com/onsi/gomega"
-  packages = [
-    ".",
-    "format",
-    "internal/assertion",
-    "internal/asyncassertion",
-    "internal/oraclematcher",
-    "internal/testingtsupport",
-    "matchers",
-    "matchers/support/goraph/bipartitegraph",
-    "matchers/support/goraph/edge",
-    "matchers/support/goraph/node",
-    "matchers/support/goraph/util",
-    "types"
-  ]
-  revision = "62bff4df71bdbc266561a0caee19f0594b17c240"
-  version = "v1.4.0"
-
-[[projects]]
-  name = "github.com/pierrec/lz4"
-  packages = [
-    ".",
-    "internal/xxh32"
-  ]
-  revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
-  version = "v2.0.2"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/rcrowley/go-metrics"
-  packages = ["."]
-  revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/net"
-  packages = [
-    "html",
-    "html/atom",
-    "html/charset"
-  ]
-  revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/sys"
-  packages = ["unix"]
-  revision = "63fc586f45fe72d95d5240a5d5eb95e6503907d3"
-
-[[projects]]
-  name = "golang.org/x/text"
-  packages = [
-    "encoding",
-    "encoding/charmap",
-    "encoding/htmlindex",
-    "encoding/internal",
-    "encoding/internal/identifier",
-    "encoding/japanese",
-    "encoding/korean",
-    "encoding/simplifiedchinese",
-    "encoding/traditionalchinese",
-    "encoding/unicode",
-    "internal/gen",
-    "internal/tag",
-    "internal/utf8internal",
-    "language",
-    "runes",
-    "transform",
-    "unicode/cldr"
-  ]
-  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
-  version = "v0.3.0"
-
-[[projects]]
-  name = "gopkg.in/yaml.v2"
-  packages = ["."]
-  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
-  version = "v2.2.1"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
deleted file mode 100644
index 1eecfef..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#  name = "github.com/x/y"
-#  version = "2.4.0"
-
-
-[[constraint]]
-  name = "github.com/Shopify/sarama"
-  version = "^1.14.0"
diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE
deleted file mode 100644
index 127751c..0000000
--- a/vendor/github.com/bsm/sarama-cluster/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 Black Square Media Ltd
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile
deleted file mode 100644
index 25c5bc2..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-SCALA_VERSION?= 2.12
-KAFKA_VERSION?= 1.1.0
-KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION)
-KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz
-KAFKA_ROOT= testdata/$(KAFKA_DIR)
-PKG=$(shell go list ./... | grep -v vendor)
-
-default: vet test
-
-vet:
-	go vet $(PKG)
-
-test: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60
-
-test-verbose: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v
-
-test-race: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race
-
-testdeps: $(KAFKA_ROOT)
-
-doc: README.md
-
-.PHONY: test testdeps vet doc
-
-# ---------------------------------------------------------------------
-
-$(KAFKA_ROOT):
-	@mkdir -p $(dir $@)
-	cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz
-
-README.md: README.md.tpl $(wildcard *.go)
-	becca -package $(subst $(GOPATH)/src/,,$(PWD))
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md
deleted file mode 100644
index ebcd755..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md
+++ /dev/null
@@ -1,151 +0,0 @@
-# Sarama Cluster
-
-[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
-[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
-[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-
-	cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
-	// init (custom) config, enable errors and notifications
-	config := cluster.NewConfig()
-	config.Consumer.Return.Errors = true
-	config.Group.Return.Notifications = true
-
-	// init consumer
-	brokers := []string{"127.0.0.1:9092"}
-	topics := []string{"my_topic", "other_topic"}
-	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
-	if err != nil {
-		panic(err)
-	}
-	defer consumer.Close()
-
-	// trap SIGINT to trigger a shutdown.
-	signals := make(chan os.Signal, 1)
-	signal.Notify(signals, os.Interrupt)
-
-	// consume errors
-	go func() {
-		for err := range consumer.Errors() {
-			log.Printf("Error: %s\n", err.Error())
-		}
-	}()
-
-	// consume notifications
-	go func() {
-		for ntf := range consumer.Notifications() {
-			log.Printf("Rebalanced: %+v\n", ntf)
-		}
-	}()
-
-	// consume messages, watch signals
-	for {
-		select {
-		case msg, ok := <-consumer.Messages():
-			if ok {
-				fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
-				consumer.MarkOffset(msg, "")	// mark message as processed
-			}
-		case <-signals:
-			return
-		}
-	}
-}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
-  "fmt"
-  "log"
-  "os"
-  "os/signal"
-
-  cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
-	// init (custom) config, set mode to ConsumerModePartitions
-	config := cluster.NewConfig()
-	config.Group.Mode = cluster.ConsumerModePartitions
-
-	// init consumer
-	brokers := []string{"127.0.0.1:9092"}
-	topics := []string{"my_topic", "other_topic"}
-	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
-	if err != nil {
-		panic(err)
-	}
-	defer consumer.Close()
-
-	// trap SIGINT to trigger a shutdown.
-	signals := make(chan os.Signal, 1)
-	signal.Notify(signals, os.Interrupt)
-
-	// consume partitions
-	for {
-		select {
-		case part, ok := <-consumer.Partitions():
-			if !ok {
-				return
-			}
-
-			// start a separate goroutine to consume messages
-			go func(pc cluster.PartitionConsumer) {
-				for msg := range pc.Messages() {
-					fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
-					consumer.MarkOffset(msg, "")	// mark message as processed
-				}
-			}(part)
-		case <-signals:
-			return
-		}
-	}
-}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
-	$ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
deleted file mode 100644
index 5f63a69..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md.tpl
+++ /dev/null
@@ -1,67 +0,0 @@
-# Sarama Cluster
-
-[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
-[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
-[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-
-	cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer" | code }}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
-  "fmt"
-  "log"
-  "os"
-  "os/signal"
-
-  cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer_Partitions" | code }}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
-	$ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go
deleted file mode 100644
index 3aeaece..0000000
--- a/vendor/github.com/bsm/sarama-cluster/balancer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package cluster
-
-import (
-	"math"
-	"sort"
-
-	"github.com/Shopify/sarama"
-)
-
-// NotificationType defines the type of notification
-type NotificationType uint8
-
-// String describes the notification type
-func (t NotificationType) String() string {
-	switch t {
-	case RebalanceStart:
-		return "rebalance start"
-	case RebalanceOK:
-		return "rebalance OK"
-	case RebalanceError:
-		return "rebalance error"
-	}
-	return "unknown"
-}
-
-const (
-	UnknownNotification NotificationType = iota
-	RebalanceStart
-	RebalanceOK
-	RebalanceError
-)
-
-// Notification are state events emitted by the consumers on rebalance
-type Notification struct {
-	// Type exposes the notification type
-	Type NotificationType
-
-	// Claimed contains topic/partitions that were claimed by this rebalance cycle
-	Claimed map[string][]int32
-
-	// Released contains topic/partitions that were released as part of this rebalance cycle
-	Released map[string][]int32
-
-	// Current are topic/partitions that are currently claimed to the consumer
-	Current map[string][]int32
-}
-
-func newNotification(current map[string][]int32) *Notification {
-	return &Notification{
-		Type:    RebalanceStart,
-		Current: current,
-	}
-}
-
-func (n *Notification) success(current map[string][]int32) *Notification {
-	o := &Notification{
-		Type:     RebalanceOK,
-		Claimed:  make(map[string][]int32),
-		Released: make(map[string][]int32),
-		Current:  current,
-	}
-	for topic, partitions := range current {
-		o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic]))
-	}
-	for topic, partitions := range n.Current {
-		o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic]))
-	}
-	return o
-}
-
-// --------------------------------------------------------------------
-
-type topicInfo struct {
-	Partitions []int32
-	MemberIDs  []string
-}
-
-func (info topicInfo) Perform(s Strategy) map[string][]int32 {
-	if s == StrategyRoundRobin {
-		return info.RoundRobin()
-	}
-	return info.Ranges()
-}
-
-func (info topicInfo) Ranges() map[string][]int32 {
-	sort.Strings(info.MemberIDs)
-
-	mlen := len(info.MemberIDs)
-	plen := len(info.Partitions)
-	res := make(map[string][]int32, mlen)
-
-	for pos, memberID := range info.MemberIDs {
-		n, i := float64(plen)/float64(mlen), float64(pos)
-		min := int(math.Floor(i*n + 0.5))
-		max := int(math.Floor((i+1)*n + 0.5))
-		sub := info.Partitions[min:max]
-		if len(sub) > 0 {
-			res[memberID] = sub
-		}
-	}
-	return res
-}
-
-func (info topicInfo) RoundRobin() map[string][]int32 {
-	sort.Strings(info.MemberIDs)
-
-	mlen := len(info.MemberIDs)
-	res := make(map[string][]int32, mlen)
-	for i, pnum := range info.Partitions {
-		memberID := info.MemberIDs[i%mlen]
-		res[memberID] = append(res[memberID], pnum)
-	}
-	return res
-}
-
-// --------------------------------------------------------------------
-
-type balancer struct {
-	client sarama.Client
-	topics map[string]topicInfo
-}
-
-func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) {
-	balancer := newBalancer(client)
-	for memberID, meta := range members {
-		for _, topic := range meta.Topics {
-			if err := balancer.Topic(topic, memberID); err != nil {
-				return nil, err
-			}
-		}
-	}
-	return balancer, nil
-}
-
-func newBalancer(client sarama.Client) *balancer {
-	return &balancer{
-		client: client,
-		topics: make(map[string]topicInfo),
-	}
-}
-
-func (r *balancer) Topic(name string, memberID string) error {
-	topic, ok := r.topics[name]
-	if !ok {
-		nums, err := r.client.Partitions(name)
-		if err != nil {
-			return err
-		}
-		topic = topicInfo{
-			Partitions: nums,
-			MemberIDs:  make([]string, 0, 1),
-		}
-	}
-	topic.MemberIDs = append(topic.MemberIDs, memberID)
-	r.topics[name] = topic
-	return nil
-}
-
-func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 {
-	res := make(map[string]map[string][]int32, 1)
-	for topic, info := range r.topics {
-		for memberID, partitions := range info.Perform(s) {
-			if _, ok := res[memberID]; !ok {
-				res[memberID] = make(map[string][]int32, 1)
-			}
-			res[memberID][topic] = partitions
-		}
-	}
-	return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go
deleted file mode 100644
index 42ffb30..0000000
--- a/vendor/github.com/bsm/sarama-cluster/client.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package cluster
-
-import (
-	"errors"
-	"sync/atomic"
-
-	"github.com/Shopify/sarama"
-)
-
-var errClientInUse = errors.New("cluster: client is already used by another consumer")
-
-// Client is a group client
-type Client struct {
-	sarama.Client
-	config Config
-
-	inUse uint32
-}
-
-// NewClient creates a new client instance
-func NewClient(addrs []string, config *Config) (*Client, error) {
-	if config == nil {
-		config = NewConfig()
-	}
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	client, err := sarama.NewClient(addrs, &config.Config)
-	if err != nil {
-		return nil, err
-	}
-
-	return &Client{Client: client, config: *config}, nil
-}
-
-// ClusterConfig returns the cluster configuration.
-func (c *Client) ClusterConfig() *Config {
-	cfg := c.config
-	return &cfg
-}
-
-func (c *Client) claim() bool {
-	return atomic.CompareAndSwapUint32(&c.inUse, 0, 1)
-}
-
-func (c *Client) release() {
-	atomic.CompareAndSwapUint32(&c.inUse, 1, 0)
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go
deleted file mode 100644
index adcf0e9..0000000
--- a/vendor/github.com/bsm/sarama-cluster/cluster.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package cluster
-
-// Strategy for partition to consumer assignement
-type Strategy string
-
-const (
-	// StrategyRange is the default and assigns partition ranges to consumers.
-	// Example with six partitions and two consumers:
-	//   C1: [0, 1, 2]
-	//   C2: [3, 4, 5]
-	StrategyRange Strategy = "range"
-
-	// StrategyRoundRobin assigns partitions by alternating over consumers.
-	// Example with six partitions and two consumers:
-	//   C1: [0, 2, 4]
-	//   C2: [1, 3, 5]
-	StrategyRoundRobin Strategy = "roundrobin"
-)
-
-// Error instances are wrappers for internal errors with a context and
-// may be returned through the consumer's Errors() channel
-type Error struct {
-	Ctx string
-	error
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go
deleted file mode 100644
index 084b835..0000000
--- a/vendor/github.com/bsm/sarama-cluster/config.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package cluster
-
-import (
-	"regexp"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-var minVersion = sarama.V0_9_0_0
-
-type ConsumerMode uint8
-
-const (
-	ConsumerModeMultiplex ConsumerMode = iota
-	ConsumerModePartitions
-)
-
-// Config extends sarama.Config with Group specific namespace
-type Config struct {
-	sarama.Config
-
-	// Group is the namespace for group management properties
-	Group struct {
-
-		// The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange)
-		PartitionStrategy Strategy
-
-		// By default, messages and errors from the subscribed topics and partitions are all multiplexed and
-		// made available through the consumer's Messages() and Errors() channels.
-		//
-		// Users who require low-level access can enable ConsumerModePartitions where individual partitions
-		// are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions
-		// themselves.
-		Mode ConsumerMode
-
-		Offsets struct {
-			Retry struct {
-				// The numer retries when committing offsets (defaults to 3).
-				Max int
-			}
-			Synchronization struct {
-				// The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance
-				// NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration
-				DwellTime time.Duration
-			}
-		}
-
-		Session struct {
-			// The allowed session timeout for registered consumers (defaults to 30s).
-			// Must be within the allowed server range.
-			Timeout time.Duration
-		}
-
-		Heartbeat struct {
-			// Interval between each heartbeat (defaults to 3s). It should be no more
-			// than 1/3rd of the Group.Session.Timout setting
-			Interval time.Duration
-		}
-
-		// Return specifies which group channels will be populated. If they are set to true,
-		// you must read from the respective channels to prevent deadlock.
-		Return struct {
-			// If enabled, rebalance notification will be returned on the
-			// Notifications channel (default disabled).
-			Notifications bool
-		}
-
-		Topics struct {
-			// An additional whitelist of topics to subscribe to.
-			Whitelist *regexp.Regexp
-			// An additional blacklist of topics to avoid. If set, this will precede over
-			// the Whitelist setting.
-			Blacklist *regexp.Regexp
-		}
-
-		Member struct {
-			// Custom metadata to include when joining the group. The user data for all joined members
-			// can be retrieved by sending a DescribeGroupRequest to the broker that is the
-			// coordinator for the group.
-			UserData []byte
-		}
-	}
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
-	c := &Config{
-		Config: *sarama.NewConfig(),
-	}
-	c.Group.PartitionStrategy = StrategyRange
-	c.Group.Offsets.Retry.Max = 3
-	c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime
-	c.Group.Session.Timeout = 30 * time.Second
-	c.Group.Heartbeat.Interval = 3 * time.Second
-	c.Config.Version = minVersion
-	return c
-}
-
-// Validate checks a Config instance. It will return a
-// sarama.ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
-	if c.Group.Heartbeat.Interval%time.Millisecond != 0 {
-		sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Group.Session.Timeout%time.Millisecond != 0 {
-		sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin {
-		sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.")
-	}
-	if !c.Version.IsAtLeast(minVersion) {
-		sarama.Logger.Println("Version is not supported; 0.9. will be assumed.")
-		c.Version = minVersion
-	}
-	if err := c.Config.Validate(); err != nil {
-		return err
-	}
-
-	// validate the Group values
-	switch {
-	case c.Group.Offsets.Retry.Max < 0:
-		return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0")
-	case c.Group.Offsets.Synchronization.DwellTime <= 0:
-		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0")
-	case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute:
-		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m")
-	case c.Group.Heartbeat.Interval <= 0:
-		return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0")
-	case c.Group.Session.Timeout <= 0:
-		return sarama.ConfigurationError("Group.Session.Timeout must be > 0")
-	case !c.Metadata.Full && c.Group.Topics.Whitelist != nil:
-		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used")
-	case !c.Metadata.Full && c.Group.Topics.Blacklist != nil:
-		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used")
-	}
-
-	// ensure offset is correct
-	switch c.Consumer.Offsets.Initial {
-	case sarama.OffsetOldest, sarama.OffsetNewest:
-	default:
-		return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest")
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go
deleted file mode 100644
index e7a67da..0000000
--- a/vendor/github.com/bsm/sarama-cluster/consumer.go
+++ /dev/null
@@ -1,919 +0,0 @@
-package cluster
-
-import (
-	"sort"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-// Consumer is a cluster group consumer
-type Consumer struct {
-	client    *Client
-	ownClient bool
-
-	consumer sarama.Consumer
-	subs     *partitionMap
-
-	consumerID string
-	groupID    string
-
-	memberID     string
-	generationID int32
-	membershipMu sync.RWMutex
-
-	coreTopics  []string
-	extraTopics []string
-
-	dying, dead chan none
-	closeOnce   sync.Once
-
-	consuming     int32
-	messages      chan *sarama.ConsumerMessage
-	errors        chan error
-	partitions    chan PartitionConsumer
-	notifications chan *Notification
-
-	commitMu sync.Mutex
-}
-
-// NewConsumer initializes a new consumer
-func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-
-	consumer, err := NewConsumerFromClient(client, groupID, topics)
-	if err != nil {
-		return nil, err
-	}
-	consumer.ownClient = true
-	return consumer, nil
-}
-
-// NewConsumerFromClient initializes a new consumer from an existing client.
-//
-// Please note that clients cannot be shared between consumers (due to Kafka internals),
-// they can only be re-used which requires the user to call Close() on the first consumer
-// before using this method again to initialize another one. Attempts to use a client with
-// more than one consumer at a time will return errors.
-func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
-	if !client.claim() {
-		return nil, errClientInUse
-	}
-
-	consumer, err := sarama.NewConsumerFromClient(client.Client)
-	if err != nil {
-		client.release()
-		return nil, err
-	}
-
-	sort.Strings(topics)
-	c := &Consumer{
-		client:   client,
-		consumer: consumer,
-		subs:     newPartitionMap(),
-		groupID:  groupID,
-
-		coreTopics: topics,
-
-		dying: make(chan none),
-		dead:  make(chan none),
-
-		messages:      make(chan *sarama.ConsumerMessage),
-		errors:        make(chan error, client.config.ChannelBufferSize),
-		partitions:    make(chan PartitionConsumer, 1),
-		notifications: make(chan *Notification),
-	}
-	if err := c.client.RefreshCoordinator(groupID); err != nil {
-		client.release()
-		return nil, err
-	}
-
-	go c.mainLoop()
-	return c, nil
-}
-
-// Messages returns the read channel for the messages that are returned by
-// the broker.
-//
-// This channel will only return if Config.Group.Mode option is set to
-// ConsumerModeMultiplex (default).
-func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
-
-// Partitions returns the read channels for individual partitions of this broker.
-//
-// This will channel will only return if Config.Group.Mode option is set to
-// ConsumerModePartitions.
-//
-// The Partitions() channel must be listened to for the life of this consumer;
-// when a rebalance happens old partitions will be closed (naturally come to
-// completion) and new ones will be emitted. The returned channel will only close
-// when the consumer is completely shut down.
-func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions }
-
-// Errors returns a read channel of errors that occur during offset management, if
-// enabled. By default, errors are logged and not returned over this channel. If
-// you want to implement any custom error handling, set your config's
-// Consumer.Return.Errors setting to true, and read from this channel.
-func (c *Consumer) Errors() <-chan error { return c.errors }
-
-// Notifications returns a channel of Notifications that occur during consumer
-// rebalancing. Notifications will only be emitted over this channel, if your config's
-// Group.Return.Notifications setting to true.
-func (c *Consumer) Notifications() <-chan *Notification { return c.notifications }
-
-// HighWaterMarks returns the current high water marks for each topic and partition
-// Consistency between partitions is not guaranteed since high water marks are updated separately.
-func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() }
-
-// MarkOffset marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Note: calling MarkOffset does not necessarily commit the offset to the backend
-// store immediately for efficiency reasons, and it may never be committed if
-// your application crashes. This means that you may end up processing the same
-// message twice, and your processing should ideally be idempotent.
-func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
-	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
-		sub.MarkOffset(msg.Offset, metadata)
-	}
-}
-
-// MarkPartitionOffset marks an offset of the provided topic/partition as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	if sub := c.subs.Fetch(topic, partition); sub != nil {
-		sub.MarkOffset(offset, metadata)
-	}
-}
-
-// MarkOffsets marks stashed offsets as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkOffsets(s *OffsetStash) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	for tp, info := range s.offsets {
-		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
-			sub.MarkOffset(info.Offset, info.Metadata)
-		}
-		delete(s.offsets, tp)
-	}
-}
-
-// ResetOffsets marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Difference between ResetOffset and MarkOffset is that it allows to rewind to an earlier offset
-func (c *Consumer) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
-	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
-		sub.ResetOffset(msg.Offset, metadata)
-	}
-}
-
-// ResetPartitionOffset marks an offset of the provided topic/partition as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	sub := c.subs.Fetch(topic, partition)
-	if sub != nil {
-		sub.ResetOffset(offset, metadata)
-	}
-}
-
-// ResetOffsets marks stashed offsets as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetOffsets(s *OffsetStash) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	for tp, info := range s.offsets {
-		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
-			sub.ResetOffset(info.Offset, info.Metadata)
-		}
-		delete(s.offsets, tp)
-	}
-}
-
-// Subscriptions returns the consumed topics and partitions
-func (c *Consumer) Subscriptions() map[string][]int32 {
-	return c.subs.Info()
-}
-
-// CommitOffsets allows to manually commit previously marked offsets. By default there is no
-// need to call this function as the consumer will commit offsets automatically
-// using the Config.Consumer.Offsets.CommitInterval setting.
-//
-// Please be aware that calling this function during an internal rebalance cycle may return
-// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration).
-func (c *Consumer) CommitOffsets() error {
-	c.commitMu.Lock()
-	defer c.commitMu.Unlock()
-
-	memberID, generationID := c.membership()
-	req := &sarama.OffsetCommitRequest{
-		Version:                 2,
-		ConsumerGroup:           c.groupID,
-		ConsumerGroupGeneration: generationID,
-		ConsumerID:              memberID,
-		RetentionTime:           -1,
-	}
-
-	if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 {
-		req.RetentionTime = int64(ns / time.Millisecond)
-	}
-
-	snap := c.subs.Snapshot()
-	dirty := false
-	for tp, state := range snap {
-		if state.Dirty {
-			dirty = true
-			req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata)
-		}
-	}
-	if !dirty {
-		return nil
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	resp, err := broker.CommitOffset(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	for topic, errs := range resp.Errors {
-		for partition, kerr := range errs {
-			if kerr != sarama.ErrNoError {
-				err = kerr
-			} else if state, ok := snap[topicPartition{topic, partition}]; ok {
-				if sub := c.subs.Fetch(topic, partition); sub != nil {
-					sub.markCommitted(state.Info.Offset)
-				}
-			}
-		}
-	}
-	return err
-}
-
-// Close safely closes the consumer and releases all resources
-func (c *Consumer) Close() (err error) {
-	c.closeOnce.Do(func() {
-		close(c.dying)
-		<-c.dead
-
-		if e := c.release(); e != nil {
-			err = e
-		}
-		if e := c.consumer.Close(); e != nil {
-			err = e
-		}
-		close(c.messages)
-		close(c.errors)
-
-		if e := c.leaveGroup(); e != nil {
-			err = e
-		}
-		close(c.partitions)
-		close(c.notifications)
-
-		// drain
-		for range c.messages {
-		}
-		for range c.errors {
-		}
-		for p := range c.partitions {
-			_ = p.Close()
-		}
-		for range c.notifications {
-		}
-
-		c.client.release()
-		if c.ownClient {
-			if e := c.client.Close(); e != nil {
-				err = e
-			}
-		}
-	})
-	return
-}
-
-func (c *Consumer) mainLoop() {
-	defer close(c.dead)
-	defer atomic.StoreInt32(&c.consuming, 0)
-
-	for {
-		atomic.StoreInt32(&c.consuming, 0)
-
-		// Check if close was requested
-		select {
-		case <-c.dying:
-			return
-		default:
-		}
-
-		// Start next consume cycle
-		c.nextTick()
-	}
-}
-
-func (c *Consumer) nextTick() {
-	// Remember previous subscriptions
-	var notification *Notification
-	if c.client.config.Group.Return.Notifications {
-		notification = newNotification(c.subs.Info())
-	}
-
-	// Refresh coordinator
-	if err := c.refreshCoordinator(); err != nil {
-		c.rebalanceError(err, nil)
-		return
-	}
-
-	// Release subscriptions
-	if err := c.release(); err != nil {
-		c.rebalanceError(err, nil)
-		return
-	}
-
-	// Issue rebalance start notification
-	if c.client.config.Group.Return.Notifications {
-		c.handleNotification(notification)
-	}
-
-	// Rebalance, fetch new subscriptions
-	subs, err := c.rebalance()
-	if err != nil {
-		c.rebalanceError(err, notification)
-		return
-	}
-
-	// Coordinate loops, make sure everything is
-	// stopped on exit
-	tomb := newLoopTomb()
-	defer tomb.Close()
-
-	// Start the heartbeat
-	tomb.Go(c.hbLoop)
-
-	// Subscribe to topic/partitions
-	if err := c.subscribe(tomb, subs); err != nil {
-		c.rebalanceError(err, notification)
-		return
-	}
-
-	// Update/issue notification with new claims
-	if c.client.config.Group.Return.Notifications {
-		notification = notification.success(subs)
-		c.handleNotification(notification)
-	}
-
-	// Start topic watcher loop
-	tomb.Go(c.twLoop)
-
-	// Start consuming and committing offsets
-	tomb.Go(c.cmLoop)
-	atomic.StoreInt32(&c.consuming, 1)
-
-	// Wait for signals
-	select {
-	case <-tomb.Dying():
-	case <-c.dying:
-	}
-}
-
-// heartbeat loop, triggered by the mainLoop
-func (c *Consumer) hbLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			switch err := c.heartbeat(); err {
-			case nil, sarama.ErrNoError:
-			case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
-				return
-			default:
-				c.handleError(&Error{Ctx: "heartbeat", error: err})
-				return
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-// topic watcher loop, triggered by the mainLoop
-func (c *Consumer) twLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			topics, err := c.client.Topics()
-			if err != nil {
-				c.handleError(&Error{Ctx: "topics", error: err})
-				return
-			}
-
-			for _, topic := range topics {
-				if !c.isKnownCoreTopic(topic) &&
-					!c.isKnownExtraTopic(topic) &&
-					c.isPotentialExtraTopic(topic) {
-					return
-				}
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-// commit loop, triggered by the mainLoop
-func (c *Consumer) cmLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil {
-				c.handleError(&Error{Ctx: "commit", error: err})
-				return
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *Consumer) rebalanceError(err error, n *Notification) {
-	if n != nil {
-		n.Type = RebalanceError
-		c.handleNotification(n)
-	}
-
-	switch err {
-	case sarama.ErrRebalanceInProgress:
-	default:
-		c.handleError(&Error{Ctx: "rebalance", error: err})
-	}
-
-	select {
-	case <-c.dying:
-	case <-time.After(c.client.config.Metadata.Retry.Backoff):
-	}
-}
-
-func (c *Consumer) handleNotification(n *Notification) {
-	if c.client.config.Group.Return.Notifications {
-		select {
-		case c.notifications <- n:
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *Consumer) handleError(e *Error) {
-	if c.client.config.Consumer.Return.Errors {
-		select {
-		case c.errors <- e:
-		case <-c.dying:
-			return
-		}
-	} else {
-		sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error())
-	}
-}
-
-// Releases the consumer and commits offsets, called from rebalance() and Close()
-func (c *Consumer) release() (err error) {
-	// Stop all consumers
-	c.subs.Stop()
-
-	// Clear subscriptions on exit
-	defer c.subs.Clear()
-
-	// Wait for messages to be processed
-	timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime)
-	defer timeout.Stop()
-
-	select {
-	case <-c.dying:
-	case <-timeout.C:
-	}
-
-	// Commit offsets, continue on errors
-	if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil {
-		err = e
-	}
-
-	return
-}
-
-// --------------------------------------------------------------------
-
-// Performs a heartbeat, part of the mainLoop()
-func (c *Consumer) heartbeat() error {
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	memberID, generationID := c.membership()
-	resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	})
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-	return resp.Err
-}
-
-// Performs a rebalance, part of the mainLoop()
-func (c *Consumer) rebalance() (map[string][]int32, error) {
-	memberID, _ := c.membership()
-	sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID)
-
-	allTopics, err := c.client.Topics()
-	if err != nil {
-		return nil, err
-	}
-	c.extraTopics = c.selectExtraTopics(allTopics)
-	sort.Strings(c.extraTopics)
-
-	// Re-join consumer group
-	strategy, err := c.joinGroup()
-	switch {
-	case err == sarama.ErrUnknownMemberId:
-		c.membershipMu.Lock()
-		c.memberID = ""
-		c.membershipMu.Unlock()
-		return nil, err
-	case err != nil:
-		return nil, err
-	}
-
-	// Sync consumer group state, fetch subscriptions
-	subs, err := c.syncGroup(strategy)
-	switch {
-	case err == sarama.ErrRebalanceInProgress:
-		return nil, err
-	case err != nil:
-		_ = c.leaveGroup()
-		return nil, err
-	}
-	return subs, nil
-}
-
-// Performs the subscription, part of the mainLoop()
-func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error {
-	// fetch offsets
-	offsets, err := c.fetchOffsets(subs)
-	if err != nil {
-		_ = c.leaveGroup()
-		return err
-	}
-
-	// create consumers in parallel
-	var mu sync.Mutex
-	var wg sync.WaitGroup
-
-	for topic, partitions := range subs {
-		for _, partition := range partitions {
-			wg.Add(1)
-
-			info := offsets[topic][partition]
-			go func(topic string, partition int32) {
-				if e := c.createConsumer(tomb, topic, partition, info); e != nil {
-					mu.Lock()
-					err = e
-					mu.Unlock()
-				}
-				wg.Done()
-			}(topic, partition)
-		}
-	}
-	wg.Wait()
-
-	if err != nil {
-		_ = c.release()
-		_ = c.leaveGroup()
-	}
-	return err
-}
-
-// --------------------------------------------------------------------
-
-// Send a request to the broker to join group on rebalance()
-func (c *Consumer) joinGroup() (*balancer, error) {
-	memberID, _ := c.membership()
-	req := &sarama.JoinGroupRequest{
-		GroupId:        c.groupID,
-		MemberId:       memberID,
-		SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond),
-		ProtocolType:   "consumer",
-	}
-
-	meta := &sarama.ConsumerGroupMemberMetadata{
-		Version:  1,
-		Topics:   append(c.coreTopics, c.extraTopics...),
-		UserData: c.client.config.Group.Member.UserData,
-	}
-	err := req.AddGroupProtocolMetadata(string(StrategyRange), meta)
-	if err != nil {
-		return nil, err
-	}
-	err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta)
-	if err != nil {
-		return nil, err
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.JoinGroup(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	} else if resp.Err != sarama.ErrNoError {
-		c.closeCoordinator(broker, resp.Err)
-		return nil, resp.Err
-	}
-
-	var strategy *balancer
-	if resp.LeaderId == resp.MemberId {
-		members, err := resp.GetMembers()
-		if err != nil {
-			return nil, err
-		}
-
-		strategy, err = newBalancerFromMeta(c.client, members)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	c.membershipMu.Lock()
-	c.memberID = resp.MemberId
-	c.generationID = resp.GenerationId
-	c.membershipMu.Unlock()
-
-	return strategy, nil
-}
-
-// Send a request to the broker to sync the group on rebalance().
-// Returns a list of topics and partitions to consume.
-func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) {
-	memberID, generationID := c.membership()
-	req := &sarama.SyncGroupRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	}
-
-	if strategy != nil {
-		for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) {
-			if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{
-				Topics: topics,
-			}); err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.SyncGroup(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	} else if resp.Err != sarama.ErrNoError {
-		c.closeCoordinator(broker, resp.Err)
-		return nil, resp.Err
-	}
-
-	// Return if there is nothing to subscribe to
-	if len(resp.MemberAssignment) == 0 {
-		return nil, nil
-	}
-
-	// Get assigned subscriptions
-	members, err := resp.GetMemberAssignment()
-	if err != nil {
-		return nil, err
-	}
-
-	// Sort partitions, for each topic
-	for topic := range members.Topics {
-		sort.Sort(int32Slice(members.Topics[topic]))
-	}
-	return members.Topics, nil
-}
-
-// Fetches latest committed offsets for all subscriptions
-func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
-	offsets := make(map[string]map[int32]offsetInfo, len(subs))
-	req := &sarama.OffsetFetchRequest{
-		Version:       1,
-		ConsumerGroup: c.groupID,
-	}
-
-	for topic, partitions := range subs {
-		offsets[topic] = make(map[int32]offsetInfo, len(partitions))
-		for _, partition := range partitions {
-			offsets[topic][partition] = offsetInfo{Offset: -1}
-			req.AddPartition(topic, partition)
-		}
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.FetchOffset(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	for topic, partitions := range subs {
-		for _, partition := range partitions {
-			block := resp.GetBlock(topic, partition)
-			if block == nil {
-				return nil, sarama.ErrIncompleteResponse
-			}
-
-			if block.Err == sarama.ErrNoError {
-				offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
-			} else {
-				return nil, block.Err
-			}
-		}
-	}
-	return offsets, nil
-}
-
-// Send a request to the broker to leave the group on failes rebalance() and on Close()
-func (c *Consumer) leaveGroup() error {
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	memberID, _ := c.membership()
-	if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{
-		GroupId:  c.groupID,
-		MemberId: memberID,
-	}); err != nil {
-		c.closeCoordinator(broker, err)
-	}
-	return err
-}
-
-// --------------------------------------------------------------------
-
-func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error {
-	memberID, _ := c.membership()
-	sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial))
-
-	// Create partitionConsumer
-	pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial)
-	if err != nil {
-		return err
-	}
-
-	// Store in subscriptions
-	c.subs.Store(topic, partition, pc)
-
-	// Start partition consumer goroutine
-	tomb.Go(func(stopper <-chan none) {
-		if c.client.config.Group.Mode == ConsumerModePartitions {
-			pc.waitFor(stopper, c.errors)
-		} else {
-			pc.multiplex(stopper, c.messages, c.errors)
-		}
-	})
-
-	if c.client.config.Group.Mode == ConsumerModePartitions {
-		c.partitions <- pc
-	}
-	return nil
-}
-
-func (c *Consumer) commitOffsetsWithRetry(retries int) error {
-	err := c.CommitOffsets()
-	if err != nil && retries > 0 {
-		return c.commitOffsetsWithRetry(retries - 1)
-	}
-	return err
-}
-
-func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) {
-	if broker != nil {
-		_ = broker.Close()
-	}
-
-	switch err {
-	case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer:
-		_ = c.client.RefreshCoordinator(c.groupID)
-	}
-}
-
-func (c *Consumer) selectExtraTopics(allTopics []string) []string {
-	extra := allTopics[:0]
-	for _, topic := range allTopics {
-		if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) {
-			extra = append(extra, topic)
-		}
-	}
-	return extra
-}
-
-func (c *Consumer) isKnownCoreTopic(topic string) bool {
-	pos := sort.SearchStrings(c.coreTopics, topic)
-	return pos < len(c.coreTopics) && c.coreTopics[pos] == topic
-}
-
-func (c *Consumer) isKnownExtraTopic(topic string) bool {
-	pos := sort.SearchStrings(c.extraTopics, topic)
-	return pos < len(c.extraTopics) && c.extraTopics[pos] == topic
-}
-
-func (c *Consumer) isPotentialExtraTopic(topic string) bool {
-	rx := c.client.config.Group.Topics
-	if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) {
-		return false
-	}
-	if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) {
-		return true
-	}
-	return false
-}
-
-func (c *Consumer) refreshCoordinator() error {
-	if err := c.refreshMetadata(); err != nil {
-		return err
-	}
-	return c.client.RefreshCoordinator(c.groupID)
-}
-
-func (c *Consumer) refreshMetadata() (err error) {
-	if c.client.config.Metadata.Full {
-		err = c.client.RefreshMetadata()
-	} else {
-		var topics []string
-		if topics, err = c.client.Topics(); err == nil && len(topics) != 0 {
-			err = c.client.RefreshMetadata(topics...)
-		}
-	}
-
-	// maybe we didn't have authorization to describe all topics
-	switch err {
-	case sarama.ErrTopicAuthorizationFailed:
-		err = c.client.RefreshMetadata(c.coreTopics...)
-	}
-	return
-}
-
-func (c *Consumer) membership() (memberID string, generationID int32) {
-	c.membershipMu.RLock()
-	memberID, generationID = c.memberID, c.generationID
-	c.membershipMu.RUnlock()
-	return
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go
deleted file mode 100644
index 9c8ff16..0000000
--- a/vendor/github.com/bsm/sarama-cluster/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package cluster provides cluster extensions for Sarama, enabing users
-to consume topics across from multiple, balanced nodes.
-
-It requires Kafka v0.9+ and follows the steps guide, described in:
-https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design
-*/
-package cluster
diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go
deleted file mode 100644
index 4223ac5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/offsets.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package cluster
-
-import (
-	"sync"
-
-	"github.com/Shopify/sarama"
-)
-
-// OffsetStash allows to accumulate offsets and
-// mark them as processed in a bulk
-type OffsetStash struct {
-	offsets map[topicPartition]offsetInfo
-	mu      sync.Mutex
-}
-
-// NewOffsetStash inits a blank stash
-func NewOffsetStash() *OffsetStash {
-	return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)}
-}
-
-// MarkOffset stashes the provided message offset
-func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
-	s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// MarkPartitionOffset stashes the offset for the provided topic/partition combination
-func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := topicPartition{Topic: topic, Partition: partition}
-	if info := s.offsets[key]; offset >= info.Offset {
-		info.Offset = offset
-		info.Metadata = metadata
-		s.offsets[key] = info
-	}
-}
-
-// ResetPartitionOffset stashes the offset for the provided topic/partition combination.
-// Difference between ResetPartitionOffset and MarkPartitionOffset is that, ResetPartitionOffset supports earlier offsets
-func (s *OffsetStash) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := topicPartition{Topic: topic, Partition: partition}
-	if info := s.offsets[key]; offset <= info.Offset {
-		info.Offset = offset
-		info.Metadata = metadata
-		s.offsets[key] = info
-	}
-}
-
-// ResetOffset stashes the provided message offset
-// See ResetPartitionOffset for explanation
-func (s *OffsetStash) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
-	s.ResetPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// Offsets returns the latest stashed offsets by topic-partition
-func (s *OffsetStash) Offsets() map[string]int64 {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	res := make(map[string]int64, len(s.offsets))
-	for tp, info := range s.offsets {
-		res[tp.String()] = info.Offset
-	}
-	return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go
deleted file mode 100644
index bfaa587..0000000
--- a/vendor/github.com/bsm/sarama-cluster/partitions.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package cluster
-
-import (
-	"sort"
-	"sync"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-// PartitionConsumer allows code to consume individual partitions from the cluster.
-//
-// See docs for Consumer.Partitions() for more on how to implement this.
-type PartitionConsumer interface {
-	sarama.PartitionConsumer
-
-	// Topic returns the consumed topic name
-	Topic() string
-
-	// Partition returns the consumed partition
-	Partition() int32
-
-	// InitialOffset returns the offset used for creating the PartitionConsumer instance.
-	// The returned offset can be a literal offset, or OffsetNewest, or OffsetOldest
-	InitialOffset() int64
-  
-	// MarkOffset marks the offset of a message as preocessed.
-	MarkOffset(offset int64, metadata string)
-
-	// ResetOffset resets the offset to a previously processed message.
-	ResetOffset(offset int64, metadata string)
-}
-
-type partitionConsumer struct {
-	sarama.PartitionConsumer
-
-	state partitionState
-	mu    sync.Mutex
-
-	topic         string
-	partition     int32
-	initialOffset int64
-
-	closeOnce sync.Once
-	closeErr  error
-
-	dying, dead chan none
-}
-
-func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
-	offset := info.NextOffset(defaultOffset)
-	pcm, err := manager.ConsumePartition(topic, partition, offset)
-
-	// Resume from default offset, if requested offset is out-of-range
-	if err == sarama.ErrOffsetOutOfRange {
-		info.Offset = -1
-		offset = defaultOffset
-		pcm, err = manager.ConsumePartition(topic, partition, offset)
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	return &partitionConsumer{
-		PartitionConsumer: pcm,
-		state:             partitionState{Info: info},
-
-		topic:         topic,
-		partition:     partition,
-		initialOffset: offset,
-
-		dying: make(chan none),
-		dead:  make(chan none),
-	}, nil
-}
-
-// Topic implements PartitionConsumer
-func (c *partitionConsumer) Topic() string { return c.topic }
-
-// Partition implements PartitionConsumer
-func (c *partitionConsumer) Partition() int32 { return c.partition }
-
-// InitialOffset implements PartitionConsumer
-func (c *partitionConsumer) InitialOffset() int64 { return c.initialOffset }
-
-// AsyncClose implements PartitionConsumer
-func (c *partitionConsumer) AsyncClose() {
-	c.closeOnce.Do(func() {
-		c.closeErr = c.PartitionConsumer.Close()
-		close(c.dying)
-	})
-}
-
-// Close implements PartitionConsumer
-func (c *partitionConsumer) Close() error {
-	c.AsyncClose()
-	<-c.dead
-	return c.closeErr
-}
-
-func (c *partitionConsumer) waitFor(stopper <-chan none, errors chan<- error) {
-	defer close(c.dead)
-
-	for {
-		select {
-		case err, ok := <-c.Errors():
-			if !ok {
-				return
-			}
-			select {
-			case errors <- err:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case <-stopper:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *partitionConsumer) multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) {
-	defer close(c.dead)
-
-	for {
-		select {
-		case msg, ok := <-c.Messages():
-			if !ok {
-				return
-			}
-			select {
-			case messages <- msg:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case err, ok := <-c.Errors():
-			if !ok {
-				return
-			}
-			select {
-			case errors <- err:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case <-stopper:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *partitionConsumer) getState() partitionState {
-	c.mu.Lock()
-	state := c.state
-	c.mu.Unlock()
-
-	return state
-}
-
-func (c *partitionConsumer) markCommitted(offset int64) {
-	c.mu.Lock()
-	if offset == c.state.Info.Offset {
-		c.state.Dirty = false
-	}
-	c.mu.Unlock()
-}
-
-// MarkOffset implements PartitionConsumer
-func (c *partitionConsumer) MarkOffset(offset int64, metadata string) {
-	c.mu.Lock()
-	if next := offset + 1; next > c.state.Info.Offset {
-		c.state.Info.Offset = next
-		c.state.Info.Metadata = metadata
-		c.state.Dirty = true
-	}
-	c.mu.Unlock()
-}
-
-// ResetOffset implements PartitionConsumer
-func (c *partitionConsumer) ResetOffset(offset int64, metadata string) {
-	c.mu.Lock()
-	if next := offset + 1; next <= c.state.Info.Offset {
-		c.state.Info.Offset = next
-		c.state.Info.Metadata = metadata
-		c.state.Dirty = true
-	}
-	c.mu.Unlock()
-}
-
-// --------------------------------------------------------------------
-
-type partitionState struct {
-	Info       offsetInfo
-	Dirty      bool
-	LastCommit time.Time
-}
-
-// --------------------------------------------------------------------
-
-type partitionMap struct {
-	data map[topicPartition]*partitionConsumer
-	mu   sync.RWMutex
-}
-
-func newPartitionMap() *partitionMap {
-	return &partitionMap{
-		data: make(map[topicPartition]*partitionConsumer),
-	}
-}
-
-func (m *partitionMap) IsSubscribedTo(topic string) bool {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	for tp := range m.data {
-		if tp.Topic == topic {
-			return true
-		}
-	}
-	return false
-}
-
-func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer {
-	m.mu.RLock()
-	pc, _ := m.data[topicPartition{topic, partition}]
-	m.mu.RUnlock()
-	return pc
-}
-
-func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) {
-	m.mu.Lock()
-	m.data[topicPartition{topic, partition}] = pc
-	m.mu.Unlock()
-}
-
-func (m *partitionMap) Snapshot() map[topicPartition]partitionState {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	snap := make(map[topicPartition]partitionState, len(m.data))
-	for tp, pc := range m.data {
-		snap[tp] = pc.getState()
-	}
-	return snap
-}
-
-func (m *partitionMap) Stop() {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	var wg sync.WaitGroup
-	for tp := range m.data {
-		wg.Add(1)
-		go func(p *partitionConsumer) {
-			_ = p.Close()
-			wg.Done()
-		}(m.data[tp])
-	}
-	wg.Wait()
-}
-
-func (m *partitionMap) Clear() {
-	m.mu.Lock()
-	for tp := range m.data {
-		delete(m.data, tp)
-	}
-	m.mu.Unlock()
-}
-
-func (m *partitionMap) Info() map[string][]int32 {
-	info := make(map[string][]int32)
-	m.mu.RLock()
-	for tp := range m.data {
-		info[tp.Topic] = append(info[tp.Topic], tp.Partition)
-	}
-	m.mu.RUnlock()
-
-	for topic := range info {
-		sort.Sort(int32Slice(info[topic]))
-	}
-	return info
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go
deleted file mode 100644
index e7cb5dd..0000000
--- a/vendor/github.com/bsm/sarama-cluster/util.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package cluster
-
-import (
-	"fmt"
-	"sort"
-	"sync"
-)
-
-type none struct{}
-
-type topicPartition struct {
-	Topic     string
-	Partition int32
-}
-
-func (tp *topicPartition) String() string {
-	return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition)
-}
-
-type offsetInfo struct {
-	Offset   int64
-	Metadata string
-}
-
-func (i offsetInfo) NextOffset(fallback int64) int64 {
-	if i.Offset > -1 {
-		return i.Offset
-	}
-	return fallback
-}
-
-type int32Slice []int32
-
-func (p int32Slice) Len() int           { return len(p) }
-func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-func (p int32Slice) Diff(o int32Slice) (res []int32) {
-	on := len(o)
-	for _, x := range p {
-		n := sort.Search(on, func(i int) bool { return o[i] >= x })
-		if n < on && o[n] == x {
-			continue
-		}
-		res = append(res, x)
-	}
-	return
-}
-
-// --------------------------------------------------------------------
-
-type loopTomb struct {
-	c chan none
-	o sync.Once
-	w sync.WaitGroup
-}
-
-func newLoopTomb() *loopTomb {
-	return &loopTomb{c: make(chan none)}
-}
-
-func (t *loopTomb) stop()  { t.o.Do(func() { close(t.c) }) }
-func (t *loopTomb) Close() { t.stop(); t.w.Wait() }
-
-func (t *loopTomb) Dying() <-chan none { return t.c }
-func (t *loopTomb) Go(f func(<-chan none)) {
-	t.w.Add(1)
-
-	go func() {
-		defer t.stop()
-		defer t.w.Done()
-
-		f(t.c)
-	}()
-}
diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE
new file mode 100644
index 0000000..553cbbf
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2020-2024 Buf Technologies, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
new file mode 100644
index 0000000..ee054fa
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
@@ -0,0 +1,420 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package editions contains helpers related to resolving features for
+// Protobuf editions. These are lower-level helpers. Higher-level helpers
+// (which use this package under the hood) can be found in the exported
+// protoutil package.
+package editions
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
+	"google.golang.org/protobuf/types/dynamicpb"
+)
+
+const (
+	// MinSupportedEdition is the earliest edition supported by this module.
+	// It should be 2023 (the first edition) for the indefinite future.
+	MinSupportedEdition = descriptorpb.Edition_EDITION_2023
+
+	// MaxSupportedEdition is the most recent edition supported by this module.
+	MaxSupportedEdition = descriptorpb.Edition_EDITION_2023
+)
+
+var (
+	// SupportedEditions is the exhaustive set of editions that protocompile
+	// can support. We don't allow it to compile future/unknown editions, to
+	// make sure we don't generate incorrect descriptors, in the event that
+	// a future edition introduces a change or new feature that requires
+	// new logic in the compiler.
+	SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition)
+
+	// FeatureSetDescriptor is the message descriptor for the compiled-in
+	// version (in the descriptorpb package) of the google.protobuf.FeatureSet
+	// message type.
+	FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor()
+	// FeatureSetType is the message type for the compiled-in version (in
+	// the descriptorpb package) of google.protobuf.FeatureSet.
+	FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type()
+
+	editionDefaults     map[descriptorpb.Edition]*descriptorpb.FeatureSet
+	editionDefaultsInit sync.Once
+)
+
+// HasFeatures is implemented by all options messages and provides a
+// nil-receiver-safe way of accessing the features explicitly configured
+// in those options.
+type HasFeatures interface {
+	GetFeatures() *descriptorpb.FeatureSet
+}
+
+var _ HasFeatures = (*descriptorpb.FileOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MessageOptions)(nil)
+var _ HasFeatures = (*descriptorpb.FieldOptions)(nil)
+var _ HasFeatures = (*descriptorpb.OneofOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MethodOptions)(nil)
+
+// ResolveFeature resolves a feature for the given descriptor. This simple
+// helper examines the given element and its ancestors, searching for an
+// override. If there is no overridden value, it returns a zero value.
+func ResolveFeature(
+	element protoreflect.Descriptor,
+	fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+	for {
+		var features *descriptorpb.FeatureSet
+		if withFeatures, ok := element.Options().(HasFeatures); ok {
+			// It should not really be possible for 'ok' to ever be false...
+			features = withFeatures.GetFeatures()
+		}
+
+		// TODO: adaptFeatureSet is only looking at the first field. But if we needed to
+		//       support an extension field inside a custom feature, we'd really need
+		//       to check all fields. That gets particularly complicated if the traversal
+		//       path of fields includes list and map values. Luckily, features are not
+		//       supposed to be repeated and not supposed to themselves have extensions.
+		//       So this should be fine, at least for now.
+		msgRef, err := adaptFeatureSet(features, fields[0])
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		// Navigate the fields to find the value
+		var val protoreflect.Value
+		for i, field := range fields {
+			if i > 0 {
+				msgRef = val.Message()
+			}
+			if !msgRef.Has(field) {
+				val = protoreflect.Value{}
+				break
+			}
+			val = msgRef.Get(field)
+		}
+		if val.IsValid() {
+			// All fields were set!
+			return val, nil
+		}
+
+		parent := element.Parent()
+		if parent == nil {
+			// We've reached the end of the inheritance chain.
+			return protoreflect.Value{}, nil
+		}
+		element = parent
+	}
+}
+
+// HasEdition should be implemented by values that implement
+// [protoreflect.FileDescriptor], to provide access to the file's
+// edition when its syntax is [protoreflect.Editions].
+type HasEdition interface {
+	// Edition returns the numeric value of a google.protobuf.Edition enum
+	// value that corresponds to the edition of this file. If the file does
+	// not use editions, it should return the enum value that corresponds
+	// to the syntax level, EDITION_PROTO2 or EDITION_PROTO3.
+	Edition() int32
+}
+
+// GetEdition returns the edition for a given element. It returns
+// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that
+// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN
+// if the syntax of the given element is not recognized or if the edition
+// cannot be ascertained from the element's [protoreflect.FileDescriptor].
+func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition {
+	switch d.ParentFile().Syntax() {
+	case protoreflect.Proto2:
+		return descriptorpb.Edition_EDITION_PROTO2
+	case protoreflect.Proto3:
+		return descriptorpb.Edition_EDITION_PROTO3
+	case protoreflect.Editions:
+		withEdition, ok := d.ParentFile().(HasEdition)
+		if !ok {
+			// The parent file should always be a *result, so we should
+			// never be able to actually get in here. If we somehow did
+			// have another implementation of protoreflect.FileDescriptor,
+			// it doesn't provide a way to get the edition, other than the
+			// potentially expensive step of generating a FileDescriptorProto
+			// and then querying for the edition from that. :/
+			return descriptorpb.Edition_EDITION_UNKNOWN
+		}
+		return descriptorpb.Edition(withEdition.Edition())
+	default:
+		return descriptorpb.Edition_EDITION_UNKNOWN
+	}
+}
+
+// GetEditionDefaults returns the default feature values for the given edition.
+// It returns nil if the given edition is not known.
+//
+// This only populates known features, those that are fields of [*descriptorpb.FeatureSet].
+// It does not populate any extension fields.
+//
+// The returned value must not be mutated as it references shared package state.
+func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet {
+	editionDefaultsInit.Do(func() {
+		editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name))
+		// Compute default for all known editions in descriptorpb.
+		for editionInt := range descriptorpb.Edition_name {
+			edition := descriptorpb.Edition(editionInt)
+			defaults := &descriptorpb.FeatureSet{}
+			defaultsRef := defaults.ProtoReflect()
+			fields := defaultsRef.Descriptor().Fields()
+			// Note: we are not computing defaults for extensions. Those are not needed
+			// by anything in the compiler, so we can get away with just computing
+			// defaults for these static, non-extension fields.
+			for i, length := 0, fields.Len(); i < length; i++ {
+				field := fields.Get(i)
+				val, err := GetFeatureDefault(edition, FeatureSetType, field)
+				if err != nil {
+					// should we fail somehow??
+					continue
+				}
+				defaultsRef.Set(field, val)
+			}
+			editionDefaults[edition] = defaults
+		}
+	})
+	return editionDefaults[edition]
+}
+
+// GetFeatureDefault computes the default value for a feature. The given container
+// is the message type that contains the field. This should usually be the descriptor
+// for google.protobuf.FeatureSet, but can be a different message for computing the
+// default value of custom features.
+//
+// Note that this always re-computes the default. For known fields of FeatureSet,
+// it is more efficient to query from the statically computed default messages,
+// like so:
+//
+//	editions.GetEditionDefaults(edition).ProtoReflect().Get(feature)
+func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	opts, ok := feature.Options().(*descriptorpb.FieldOptions)
+	if !ok {
+		// this is most likely impossible except for contrived use cases...
+		return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options())
+	}
+	maxEdition := descriptorpb.Edition(-1)
+	var maxVal string
+	for _, def := range opts.EditionDefaults {
+		if def.GetEdition() <= edition && def.GetEdition() > maxEdition {
+			maxEdition = def.GetEdition()
+			maxVal = def.GetValue()
+		}
+	}
+	if maxEdition == -1 {
+		// no matching default found
+		return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition)
+	}
+	// We use a typed nil so that it won't fall back to the global registry. Features
+	// should not use extensions or google.protobuf.Any, so a nil *Types is fine.
+	unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)}
+	// The string value is in the text format: either a field value literal or a
+	// message literal. (Repeated and map features aren't supported, so there's no
+	// array or map literal syntax to worry about.)
+	if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind {
+		fldVal := container.Zero().NewField(feature)
+		err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface())
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return fldVal, nil
+	}
+	// The value is the textformat for the field. But prototext doesn't provide a way
+	// to unmarshal a single field value. To work around, we unmarshal into an enclosing
+	// message, which means we must prefix the value with the field name.
+	if feature.IsExtension() {
+		maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal)
+	} else {
+		maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal)
+	}
+	empty := container.New()
+	err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface())
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return empty.Get(feature), nil
+}
+
+func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) {
+	msgRef := msg.ProtoReflect()
+	var actualField protoreflect.FieldDescriptor
+	switch {
+	case field.IsExtension():
+		// Extensions can be used directly with the feature set, even if
+		// field.ContainingMessage() != FeatureSetDescriptor. But only if
+		// the value is either not a message or is a message with the
+		// right descriptor, i.e. val.Descriptor() == field.Message().
+		if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field {
+			if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 {
+				return msgRef, nil
+			}
+			// The field is not present, but the message has unrecognized values. So
+			// let's try to parse the unrecognized bytes, just in case they contain
+			// this extension.
+			temp := &descriptorpb.FeatureSet{}
+			unmarshaler := proto.UnmarshalOptions{
+				AllowPartial: true,
+				Resolver:     resolverForExtension{field},
+			}
+			if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil {
+				return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err)
+			}
+			return temp.ProtoReflect(), nil
+		}
+	case field.ContainingMessage() == FeatureSetDescriptor:
+		// Known field, not dynamically generated. Can directly use with the feature set.
+		return msgRef, nil
+	default:
+		actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number())
+	}
+
+	// If we get here, we have a dynamic field descriptor or an extension
+	// descriptor whose message type does not match the descriptor of the
+	// stored value. We need to copy its value into a dynamic message,
+	// which requires marshalling/unmarshalling.
+	// We only need to copy over the unrecognized bytes (if any)
+	// and the same field (if present).
+	data := msgRef.GetUnknown()
+	if actualField != nil && msgRef.Has(actualField) {
+		subset := &descriptorpb.FeatureSet{}
+		subset.ProtoReflect().Set(actualField, msgRef.Get(actualField))
+		var err error
+		data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset)
+		if err != nil {
+			return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+		}
+	}
+	if len(data) == 0 {
+		// No relevant data to copy over, so we can just return
+		// a zero value message
+		return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil
+	}
+
+	other := dynamicpb.NewMessage(field.ContainingMessage())
+	// We don't need to use a resolver for this step because we know that
+	// field is not an extension. And features are not allowed to themselves
+	// have extensions.
+	if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil {
+		return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+	}
+	return other, nil
+}
+
+type resolverForExtension struct {
+	ext protoreflect.ExtensionDescriptor
+}
+
+func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) {
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) {
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	if field == r.ext.FullName() {
+		return asExtensionType(r.ext), nil
+	}
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() {
+		return asExtensionType(r.ext), nil
+	}
+	return nil, protoregistry.NotFound
+}
+
+func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+	if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok {
+		return xtd.Type()
+	}
+	return dynamicpb.NewExtensionType(ext)
+}
+
+func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition {
+	supportedEditions := map[string]descriptorpb.Edition{}
+	for editionNum := range descriptorpb.Edition_name {
+		edition := descriptorpb.Edition(editionNum)
+		if edition >= minEdition && edition <= maxEdition {
+			name := strings.TrimPrefix(edition.String(), "EDITION_")
+			supportedEditions[name] = edition
+		}
+	}
+	return supportedEditions
+}
+
+// actualDescriptor returns the actual field descriptor referenced by msg that
+// corresponds to the given ext (i.e. same number). It returns nil if msg has
+// no reference, if the actual descriptor is the same as ext, or if ext is
+// otherwise safe to use as is.
+func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor {
+	if !msg.Has(ext) || ext.Message() == nil {
+		// nothing to match; safe as is
+		return nil
+	}
+	val := msg.Get(ext)
+	switch {
+	case ext.IsMap(): // should not actually be possible
+		expectedDescriptor := ext.MapValue().Message()
+		if expectedDescriptor == nil {
+			return nil // nothing to match
+		}
+		// We know msg.Has(field) is true, from above, so there's at least one entry.
+		var matches bool
+		val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool {
+			matches = val.Message().Descriptor() == expectedDescriptor
+			return false
+		})
+		if matches {
+			return nil
+		}
+	case ext.IsList():
+		// We know msg.Has(field) is true, from above, so there's at least one entry.
+		if val.List().Get(0).Message().Descriptor() == ext.Message() {
+			return nil
+		}
+	case !ext.IsMap():
+		if val.Message().Descriptor() == ext.Message() {
+			return nil
+		}
+	}
+	// The underlying message descriptors do not match. So we need to return
+	// the actual field descriptor. Sadly, protoreflect.Message provides no way
+	// to query the field descriptor in a message by number. For non-extensions,
+	// one can query the associated message descriptor. But for extensions, we
+	// have to do the slow thing, and range through all fields looking for it.
+	var actualField protoreflect.FieldDescriptor
+	msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+		if fd.Number() == ext.Number() {
+			actualField = fd
+			return false
+		}
+		return true
+	})
+	return actualField
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
new file mode 100644
index 0000000..fb21dff
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
@@ -0,0 +1,140 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package protoutil
+
+import (
+	"fmt"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+	"google.golang.org/protobuf/types/dynamicpb"
+
+	"github.com/bufbuild/protocompile/internal/editions"
+)
+
+// GetFeatureDefault gets the default value for the given feature and the given
+// edition. The given feature must represent a field of the google.protobuf.FeatureSet
+// message and must not be an extension.
+//
+// If the given field is from a dynamically built descriptor (i.e. it's containing
+// message descriptor is different from the linked-in descriptor for
+// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such
+// cases, the value may not be directly usable using [protoreflect.Message.Set] with
+// an instance of [*descriptorpb.FeatureSet] and must instead be used with a
+// [*dynamicpb.Message].
+//
+// To get the default value of a custom feature, use [GetCustomFeatureDefault]
+// instead.
+func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+			feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName())
+	}
+	var msgType protoreflect.MessageType
+	if feature.ContainingMessage() == editions.FeatureSetDescriptor {
+		msgType = editions.FeatureSetType
+	} else {
+		msgType = dynamicpb.NewMessageType(feature.ContainingMessage())
+	}
+	return editions.GetFeatureDefault(edition, msgType, feature)
+}
+
+// GetCustomFeatureDefault gets the default value for the given custom feature
+// and given edition. A custom feature is a field whose containing message is the
+// type of an extension field of google.protobuf.FeatureSet. The given extension
+// describes that extension field and message type. The given feature must be a
+// field of that extension's message type.
+func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	extDesc := extension.TypeDescriptor()
+	if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+		return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName())
+	}
+	if extDesc.Message() == nil {
+		return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s",
+			editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String())
+	}
+	if feature.IsExtension() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions",
+			feature.FullName(), extDesc.FullName())
+	}
+	if feature.ContainingMessage().FullName() != extDesc.Message().FullName() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+			feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName())
+	}
+	if feature.ContainingMessage() != extDesc.Message() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s",
+			feature.Name(), extDesc.Message().FullName())
+	}
+	return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature)
+}
+
+// ResolveFeature resolves a feature for the given descriptor.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+//
+// This has the same caveat as GetFeatureDefault if the given feature is from a
+// dynamically built descriptor.
+func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	edition := editions.GetEdition(element)
+	defaultVal, err := GetFeatureDefault(edition, feature)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return resolveFeature(edition, defaultVal, element, feature)
+}
+
+// ResolveCustomFeature resolves a custom feature for the given extension and
+// field descriptor.
+//
+// The given extension must be an extension of google.protobuf.FeatureSet that
+// represents a non-repeated message value. The given feature is a field in
+// that extension's message type.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	edition := editions.GetEdition(element)
+	defaultVal, err := GetCustomFeatureDefault(edition, extension, feature)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature)
+}
+
+func resolveFeature(
+	edition descriptorpb.Edition,
+	defaultVal protoreflect.Value,
+	element protoreflect.Descriptor,
+	fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+	if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 {
+		// these syntax levels can't specify features, so we can short-circuit the search
+		// through the descriptor hierarchy for feature overrides
+		return defaultVal, nil
+	}
+	val, err := editions.ResolveFeature(element, fields...)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	if val.IsValid() {
+		return val, nil
+	}
+	return defaultVal, nil
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
new file mode 100644
index 0000000..9c55999
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
@@ -0,0 +1,262 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package protoutil contains useful functions for interacting with descriptors.
+// For now these include only functions for efficiently converting descriptors
+// produced by the compiler to descriptor protos and functions for resolving
+// "features" (a core concept of Protobuf Editions).
+//
+// Despite the fact that descriptor protos are mutable, calling code should NOT
+// mutate any of the protos returned from this package. For efficiency, some
+// values returned from this package may reference internal state of a compiler
+// result, and mutating the proto could corrupt or invalidate parts of that
+// result.
+package protoutil
+
+import (
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+)
+
+// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an
+// underlying descriptor proto. It provides the same interface as
+// Descriptor but with one extra operation, to efficiently query for
+// the underlying descriptor proto.
+//
+// Descriptors that implement this will also implement another method
+// whose specified return type is the concrete type returned by the
+// AsProto method. The name of this method varies by the type of this
+// descriptor:
+//
+//	 Descriptor Type        Other Method Name
+//	---------------------+------------------------------------
+//	 FileDescriptor      |  FileDescriptorProto()
+//	 MessageDescriptor   |  MessageDescriptorProto()
+//	 FieldDescriptor     |  FieldDescriptorProto()
+//	 OneofDescriptor     |  OneofDescriptorProto()
+//	 EnumDescriptor      |  EnumDescriptorProto()
+//	 EnumValueDescriptor |  EnumValueDescriptorProto()
+//	 ServiceDescriptor   |  ServiceDescriptorProto()
+//	 MethodDescriptor    |  MethodDescriptorProto()
+//
+// For example, a DescriptorProtoWrapper that implements FileDescriptor
+// returns a *descriptorpb.FileDescriptorProto value from its AsProto
+// method and also provides a method with the following signature:
+//
+//	FileDescriptorProto() *descriptorpb.FileDescriptorProto
+type DescriptorProtoWrapper interface {
+	protoreflect.Descriptor
+	// AsProto returns the underlying descriptor proto. The concrete
+	// type of the proto message depends on the type of this
+	// descriptor:
+	//    Descriptor Type        Proto Message Type
+	//   ---------------------+------------------------------------
+	//    FileDescriptor      |  *descriptorpb.FileDescriptorProto
+	//    MessageDescriptor   |  *descriptorpb.DescriptorProto
+	//    FieldDescriptor     |  *descriptorpb.FieldDescriptorProto
+	//    OneofDescriptor     |  *descriptorpb.OneofDescriptorProto
+	//    EnumDescriptor      |  *descriptorpb.EnumDescriptorProto
+	//    EnumValueDescriptor |  *descriptorpb.EnumValueDescriptorProto
+	//    ServiceDescriptor   |  *descriptorpb.ServiceDescriptorProto
+	//    MethodDescriptor    |  *descriptorpb.MethodDescriptorProto
+	AsProto() proto.Message
+}
+
+// ProtoFromDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return ProtoFromFileDescriptor(d)
+	case protoreflect.MessageDescriptor:
+		return ProtoFromMessageDescriptor(d)
+	case protoreflect.FieldDescriptor:
+		return ProtoFromFieldDescriptor(d)
+	case protoreflect.OneofDescriptor:
+		return ProtoFromOneofDescriptor(d)
+	case protoreflect.EnumDescriptor:
+		return ProtoFromEnumDescriptor(d)
+	case protoreflect.EnumValueDescriptor:
+		return ProtoFromEnumValueDescriptor(d)
+	case protoreflect.ServiceDescriptor:
+		return ProtoFromServiceDescriptor(d)
+	case protoreflect.MethodDescriptor:
+		return ProtoFromMethodDescriptor(d)
+	default:
+		// WTF??
+		if res, ok := d.(DescriptorProtoWrapper); ok {
+			return res.AsProto()
+		}
+		return nil
+	}
+}
+
+// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For file descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. File descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
+	if imp, ok := d.(protoreflect.FileImport); ok {
+		d = imp.FileDescriptor
+	}
+	type canProto interface {
+		FileDescriptorProto() *descriptorpb.FileDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.FileDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {
+			return fd
+		}
+	}
+	return protodesc.ToFileDescriptorProto(d)
+}
+
+// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For message descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Message descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
+	type canProto interface {
+		MessageDescriptorProto() *descriptorpb.DescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.MessageDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok {
+			return md
+		}
+	}
+	return protodesc.ToDescriptorProto(d)
+}
+
+// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For field descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Field descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
+	type canProto interface {
+		FieldDescriptorProto() *descriptorpb.FieldDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.FieldDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok {
+			return fd
+		}
+	}
+	return protodesc.ToFieldDescriptorProto(d)
+}
+
+// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For oneof descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Oneof descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
+	type canProto interface {
+		OneofDescriptorProto() *descriptorpb.OneofDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.OneofDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok {
+			return ood
+		}
+	}
+	return protodesc.ToOneofDescriptorProto(d)
+}
+
+// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
+	type canProto interface {
+		EnumDescriptorProto() *descriptorpb.EnumDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.EnumDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok {
+			return ed
+		}
+	}
+	return protodesc.ToEnumDescriptorProto(d)
+}
+
+// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum value descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum value descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
+	type canProto interface {
+		EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.EnumValueDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok {
+			return ed
+		}
+	}
+	return protodesc.ToEnumValueDescriptorProto(d)
+}
+
+// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For service descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Service descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
+	type canProto interface {
+		ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.ServiceDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok {
+			return sd
+		}
+	}
+	return protodesc.ToServiceDescriptorProto(d)
+}
+
+// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For method descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Method descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
+	type canProto interface {
+		MethodDescriptorProto() *descriptorpb.MethodDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.MethodDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {
+			return md
+		}
+	}
+	return protodesc.ToMethodDescriptorProto(d)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b..33c8830 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@
 - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
 - [FreeCache](https://github.com/coocood/freecache)
 - [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45..78bddf1 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@
 // Store the primes in an array as well.
 //
 // The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
 var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
 
 // Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
 type Digest struct {
 	v1    uint64
 	v2    uint64
@@ -33,19 +36,31 @@
 	n     int // how much of mem is used
 }
 
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
 func New() *Digest {
+	return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
 	var d Digest
-	d.Reset()
+	d.ResetWithSeed(seed)
 	return &d
 }
 
 // Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
 func (d *Digest) Reset() {
-	d.v1 = primes[0] + prime2
-	d.v2 = prime2
-	d.v3 = 0
-	d.v4 = -primes[0]
+	d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+	d.v1 = seed + prime1 + prime2
+	d.v2 = seed + prime2
+	d.v3 = seed
+	d.v4 = seed - prime1
 	d.total = 0
 	d.n = 0
 }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a..78f95f2 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
 
 package xxhash
 
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
 //
 //go:noescape
 func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13b..118e49e 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
 
 package xxhash
 
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
 func Sum64(b []byte) uint64 {
 	// A simpler version would be
 	//   d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5..05f5e7d 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
 
 package xxhash
 
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
 func Sum64String(s string) uint64 {
 	return Sum64([]byte(s))
 }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638f..cf9d42a 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@
 //
 // See https://github.com/golang/go/issues/42739 for discussion.
 
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
 // It may be faster than Sum64([]byte(s)) by avoiding a copy.
 func Sum64String(s string) uint64 {
 	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
deleted file mode 100644
index 193242f..0000000
--- a/vendor/github.com/cevaris/ordered_map/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-language: go
-
-go:
-  - tip
-  - 1.12
-  - 1.11
-  - 1.10
-  - 1.9
-  - 1.8
-  - 1.7
-  - 1.6
-  - 1.5
-  - 1.4
-  - 1.3
-
-install:
-  - make
-  - make test
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
index bc3e366..a77994d 100644
--- a/vendor/github.com/cevaris/ordered_map/README.md
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -9,7 +9,7 @@
 - Full support Key/Value for all data types
 - Exposes an Iterator that iterates in order of insertion
 - Full Get/Set/Delete map interface
-- Supports Golang v1.3 through v1.12
+- Supports Golang v1.3 through v1.19
 
 ## Download and Install 
   
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/coreos/etcd/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/etcd/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
deleted file mode 100644
index c5faf00..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: auth.proto
-
-package authpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Permission_Type int32
-
-const (
-	READ      Permission_Type = 0
-	WRITE     Permission_Type = 1
-	READWRITE Permission_Type = 2
-)
-
-var Permission_Type_name = map[int32]string{
-	0: "READ",
-	1: "WRITE",
-	2: "READWRITE",
-}
-
-var Permission_Type_value = map[string]int32{
-	"READ":      0,
-	"WRITE":     1,
-	"READWRITE": 2,
-}
-
-func (x Permission_Type) String() string {
-	return proto.EnumName(Permission_Type_name, int32(x))
-}
-
-func (Permission_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
-}
-
-// User is a single entry in the bucket authUsers
-type User struct {
-	Name                 []byte   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             []byte   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	Roles                []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *User) Reset()         { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage()    {}
-func (*User) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{0}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_User.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *User) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_User.Merge(m, src)
-}
-func (m *User) XXX_Size() int {
-	return m.Size()
-}
-func (m *User) XXX_DiscardUnknown() {
-	xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-// Permission is a single entity
-type Permission struct {
-	PermType             Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
-	Key                  []byte          `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
-	RangeEnd             []byte          `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *Permission) Reset()         { *m = Permission{} }
-func (m *Permission) String() string { return proto.CompactTextString(m) }
-func (*Permission) ProtoMessage()    {}
-func (*Permission) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{1}
-}
-func (m *Permission) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Permission) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Permission.Merge(m, src)
-}
-func (m *Permission) XXX_Size() int {
-	return m.Size()
-}
-func (m *Permission) XXX_DiscardUnknown() {
-	xxx_messageInfo_Permission.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Permission proto.InternalMessageInfo
-
-// Role is a single entry in the bucket authRoles
-type Role struct {
-	Name                 []byte        `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	KeyPermission        []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *Role) Reset()         { *m = Role{} }
-func (m *Role) String() string { return proto.CompactTextString(m) }
-func (*Role) ProtoMessage()    {}
-func (*Role) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{2}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Role.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Role) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
-	return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
-	xxx_messageInfo_Role.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Role proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
-	proto.RegisterType((*User)(nil), "authpb.User")
-	proto.RegisterType((*Permission)(nil), "authpb.Permission")
-	proto.RegisterType((*Role)(nil), "authpb.Role")
-}
-
-func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
-
-var fileDescriptor_8bbd6f3875b0e874 = []byte{
-	// 288 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
-	0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
-	0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
-	0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
-	0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
-	0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
-	0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
-	0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
-	0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
-	0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
-	0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
-	0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
-	0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
-	0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
-	0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
-	0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
-	0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
-	0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
-}
-
-func (m *User) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *User) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Permission) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.PermType != 0 {
-		i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Role) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Role) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.KeyPermission) > 0 {
-		for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintAuth(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
-	offset -= sovAuth(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *User) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovAuth(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Permission) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.PermType != 0 {
-		n += 1 + sovAuth(uint64(m.PermType))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Role) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if len(m.KeyPermission) > 0 {
-		for _, e := range m.KeyPermission {
-			l = e.Size()
-			n += 1 + l + sovAuth(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovAuth(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozAuth(x uint64) (n int) {
-	return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *User) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: User: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
-			if m.Name == nil {
-				m.Name = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
-			if m.Password == nil {
-				m.Password = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Permission) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Permission: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
-			}
-			m.PermType = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.PermType |= Permission_Type(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Role) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Role: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
-			if m.Name == nil {
-				m.Name = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.KeyPermission = append(m.KeyPermission, &Permission{})
-			if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipAuth(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthAuth
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthAuth
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowAuth
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipAuth(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthAuth
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowAuth   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
deleted file mode 100644
index 001d334..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-syntax = "proto3";
-package authpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-// User is a single entry in the bucket authUsers
-message User {
-  bytes name = 1;
-  bytes password = 2;
-  repeated string roles = 3;
-}
-
-// Permission is a single entity
-message Permission {
-  enum Type {
-    READ = 0;
-    WRITE = 1;
-    READWRITE = 2;
-  }
-  Type permType = 1;
-
-  bytes key = 2;
-  bytes range_end = 3;
-}
-
-// Role is a single entry in the bucket authRoles
-message Role {
-  bytes name = 1;
-
-  repeated Permission keyPermission = 2;
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
deleted file mode 100644
index 9306385..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package balancer implements client balancer.
-package balancer
-
-import (
-	"strconv"
-	"sync"
-	"time"
-
-	"github.com/coreos/etcd/clientv3/balancer/connectivity"
-	"github.com/coreos/etcd/clientv3/balancer/picker"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/balancer"
-	grpcconnectivity "google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/resolver"
-	_ "google.golang.org/grpc/resolver/dns"         // register DNS resolver
-	_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
-)
-
-// Config defines balancer configurations.
-type Config struct {
-	// Policy configures balancer policy.
-	Policy picker.Policy
-
-	// Picker implements gRPC picker.
-	// Leave empty if "Policy" field is not custom.
-	// TODO: currently custom policy is not supported.
-	// Picker picker.Picker
-
-	// Name defines an additional name for balancer.
-	// Useful for balancer testing to avoid register conflicts.
-	// If empty, defaults to policy name.
-	Name string
-
-	// Logger configures balancer logging.
-	// If nil, logs are discarded.
-	Logger *zap.Logger
-}
-
-// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
-// must be invoked at initialization time.
-func RegisterBuilder(cfg Config) {
-	bb := &builder{cfg}
-	balancer.Register(bb)
-
-	bb.cfg.Logger.Debug(
-		"registered balancer",
-		zap.String("policy", bb.cfg.Policy.String()),
-		zap.String("name", bb.cfg.Name),
-	)
-}
-
-type builder struct {
-	cfg Config
-}
-
-// Build is called initially when creating "ccBalancerWrapper".
-// "grpc.Dial" is called to this client connection.
-// Then, resolved addresses will be handled via "HandleResolvedAddrs".
-func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	bb := &baseBalancer{
-		id:     strconv.FormatInt(time.Now().UnixNano(), 36),
-		policy: b.cfg.Policy,
-		name:   b.cfg.Name,
-		lg:     b.cfg.Logger,
-
-		addrToSc: make(map[resolver.Address]balancer.SubConn),
-		scToAddr: make(map[balancer.SubConn]resolver.Address),
-		scToSt:   make(map[balancer.SubConn]grpcconnectivity.State),
-
-		currentConn:          nil,
-		connectivityRecorder: connectivity.New(b.cfg.Logger),
-
-		// initialize picker always returns "ErrNoSubConnAvailable"
-		picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
-	}
-
-	// TODO: support multiple connections
-	bb.mu.Lock()
-	bb.currentConn = cc
-	bb.mu.Unlock()
-
-	bb.lg.Info(
-		"built balancer",
-		zap.String("balancer-id", bb.id),
-		zap.String("policy", bb.policy.String()),
-		zap.String("resolver-target", cc.Target()),
-	)
-	return bb
-}
-
-// Name implements "grpc/balancer.Builder" interface.
-func (b *builder) Name() string { return b.cfg.Name }
-
-// Balancer defines client balancer interface.
-type Balancer interface {
-	// Balancer is called on specified client connection. Client initiates gRPC
-	// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
-	// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
-	// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
-	// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
-	// changes, thus requires failover logic in this method.
-	balancer.Balancer
-
-	// Picker calls "Pick" for every client request.
-	picker.Picker
-}
-
-type baseBalancer struct {
-	id     string
-	policy picker.Policy
-	name   string
-	lg     *zap.Logger
-
-	mu sync.RWMutex
-
-	addrToSc map[resolver.Address]balancer.SubConn
-	scToAddr map[balancer.SubConn]resolver.Address
-	scToSt   map[balancer.SubConn]grpcconnectivity.State
-
-	currentConn          balancer.ClientConn
-	connectivityRecorder connectivity.Recorder
-
-	picker picker.Picker
-}
-
-// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
-// gRPC sends initial or updated resolved addresses from "Build".
-func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
-		return
-	}
-	bb.lg.Info("resolved",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.Strings("addresses", addrsToStrings(addrs)),
-	)
-
-	bb.mu.Lock()
-	defer bb.mu.Unlock()
-
-	resolved := make(map[resolver.Address]struct{})
-	for _, addr := range addrs {
-		resolved[addr] = struct{}{}
-		if _, ok := bb.addrToSc[addr]; !ok {
-			sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
-			if err != nil {
-				bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
-				continue
-			}
-			bb.lg.Info("created subconn", zap.String("address", addr.Addr))
-			bb.addrToSc[addr] = sc
-			bb.scToAddr[sc] = addr
-			bb.scToSt[sc] = grpcconnectivity.Idle
-			sc.Connect()
-		}
-	}
-
-	for addr, sc := range bb.addrToSc {
-		if _, ok := resolved[addr]; !ok {
-			// was removed by resolver or failed to create subconn
-			bb.currentConn.RemoveSubConn(sc)
-			delete(bb.addrToSc, addr)
-
-			bb.lg.Info(
-				"removed subconn",
-				zap.String("picker", bb.picker.String()),
-				zap.String("balancer-id", bb.id),
-				zap.String("address", addr.Addr),
-				zap.String("subconn", scToString(sc)),
-			)
-
-			// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
-			// (DO NOT) delete(bb.scToAddr, sc)
-			// (DO NOT) delete(bb.scToSt, sc)
-		}
-	}
-}
-
-// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
-func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
-	bb.mu.Lock()
-	defer bb.mu.Unlock()
-
-	old, ok := bb.scToSt[sc]
-	if !ok {
-		bb.lg.Warn(
-			"state change for an unknown subconn",
-			zap.String("picker", bb.picker.String()),
-			zap.String("balancer-id", bb.id),
-			zap.String("subconn", scToString(sc)),
-			zap.Int("subconn-size", len(bb.scToAddr)),
-			zap.String("state", s.String()),
-		)
-		return
-	}
-
-	bb.lg.Info(
-		"state changed",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.Bool("connected", s == grpcconnectivity.Ready),
-		zap.String("subconn", scToString(sc)),
-		zap.Int("subconn-size", len(bb.scToAddr)),
-		zap.String("address", bb.scToAddr[sc].Addr),
-		zap.String("old-state", old.String()),
-		zap.String("new-state", s.String()),
-	)
-
-	bb.scToSt[sc] = s
-	switch s {
-	case grpcconnectivity.Idle:
-		sc.Connect()
-	case grpcconnectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scToSt. Remove state for this sc here.
-		delete(bb.scToAddr, sc)
-		delete(bb.scToSt, sc)
-	}
-
-	oldAggrState := bb.connectivityRecorder.GetCurrentState()
-	bb.connectivityRecorder.RecordTransition(old, s)
-
-	// Update balancer picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
-	if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
-		(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
-		bb.updatePicker()
-	}
-
-	bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
-}
-
-func (bb *baseBalancer) updatePicker() {
-	if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
-		bb.picker = picker.NewErr(balancer.ErrTransientFailure)
-		bb.lg.Info(
-			"updated picker to transient error picker",
-			zap.String("picker", bb.picker.String()),
-			zap.String("balancer-id", bb.id),
-			zap.String("policy", bb.policy.String()),
-		)
-		return
-	}
-
-	// only pass ready subconns to picker
-	scToAddr := make(map[balancer.SubConn]resolver.Address)
-	for addr, sc := range bb.addrToSc {
-		if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
-			scToAddr[sc] = addr
-		}
-	}
-
-	bb.picker = picker.New(picker.Config{
-		Policy:                   bb.policy,
-		Logger:                   bb.lg,
-		SubConnToResolverAddress: scToAddr,
-	})
-	bb.lg.Info(
-		"updated picker",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.String("policy", bb.policy.String()),
-		zap.Strings("subconn-ready", scsToStrings(scToAddr)),
-		zap.Int("subconn-size", len(scToAddr)),
-	)
-}
-
-// Close implements "grpc/balancer.Balancer" interface.
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
-func (bb *baseBalancer) Close() {
-	// TODO
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
deleted file mode 100644
index 4c4ad36..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package connectivity implements client connectivity operations.
-package connectivity
-
-import (
-	"sync"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/connectivity"
-)
-
-// Recorder records gRPC connectivity.
-type Recorder interface {
-	GetCurrentState() connectivity.State
-	RecordTransition(oldState, newState connectivity.State)
-}
-
-// New returns a new Recorder.
-func New(lg *zap.Logger) Recorder {
-	return &recorder{lg: lg}
-}
-
-// recorder takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-type recorder struct {
-	lg *zap.Logger
-
-	mu sync.RWMutex
-
-	cur connectivity.State
-
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-func (rc *recorder) GetCurrentState() (state connectivity.State) {
-	rc.mu.RLock()
-	defer rc.mu.RUnlock()
-	return rc.cur
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-//  - If at least one SubConn in Ready, the aggregated state is Ready;
-//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-//  - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-//
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
-	rc.mu.Lock()
-	defer rc.mu.Unlock()
-
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			rc.numReady += updateVal
-		case connectivity.Connecting:
-			rc.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			rc.numTransientFailure += updateVal
-		default:
-			rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
-		}
-	}
-
-	switch { // must be exclusive, no overlap
-	case rc.numReady > 0:
-		rc.cur = connectivity.Ready
-	case rc.numConnecting > 0:
-		rc.cur = connectivity.Connecting
-	default:
-		rc.cur = connectivity.TransientFailure
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
deleted file mode 100644
index 35dabf5..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package picker defines/implements client balancer picker policy.
-package picker
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
deleted file mode 100644
index 9e04378..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"context"
-
-	"google.golang.org/grpc/balancer"
-)
-
-// NewErr returns a picker that always returns err on "Pick".
-func NewErr(err error) Picker {
-	return &errPicker{p: Error, err: err}
-}
-
-type errPicker struct {
-	p   Policy
-	err error
-}
-
-func (ep *errPicker) String() string {
-	return ep.p.String()
-}
-
-func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, ep.err
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
deleted file mode 100644
index bd1a5d2..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"fmt"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// Picker defines balancer Picker methods.
-type Picker interface {
-	balancer.Picker
-	String() string
-}
-
-// Config defines picker configuration.
-type Config struct {
-	// Policy specifies etcd clientv3's built in balancer policy.
-	Policy Policy
-
-	// Logger defines picker logging object.
-	Logger *zap.Logger
-
-	// SubConnToResolverAddress maps each gRPC sub-connection to an address.
-	// Basically, it is a list of addresses that the Picker can pick from.
-	SubConnToResolverAddress map[balancer.SubConn]resolver.Address
-}
-
-// Policy defines balancer picker policy.
-type Policy uint8
-
-const (
-	// Error is error picker policy.
-	Error Policy = iota
-
-	// RoundrobinBalanced balances loads over multiple endpoints
-	// and implements failover in roundrobin fashion.
-	RoundrobinBalanced
-
-	// Custom defines custom balancer picker.
-	// TODO: custom picker is not supported yet.
-	Custom
-)
-
-func (p Policy) String() string {
-	switch p {
-	case Error:
-		return "picker-error"
-
-	case RoundrobinBalanced:
-		return "picker-roundrobin-balanced"
-
-	case Custom:
-		panic("'custom' picker policy is not supported yet")
-
-	default:
-		panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
-	}
-}
-
-// New creates a new Picker.
-func New(cfg Config) Picker {
-	switch cfg.Policy {
-	case Error:
-		panic("'error' picker policy is not supported here; use 'picker.NewErr'")
-
-	case RoundrobinBalanced:
-		return newRoundrobinBalanced(cfg)
-
-	case Custom:
-		panic("'custom' picker policy is not supported yet")
-
-	default:
-		panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
deleted file mode 100644
index 1b8b285..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"context"
-	"sync"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// newRoundrobinBalanced returns a new roundrobin balanced picker.
-func newRoundrobinBalanced(cfg Config) Picker {
-	scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
-	for sc := range cfg.SubConnToResolverAddress {
-		scs = append(scs, sc)
-	}
-	return &rrBalanced{
-		p:        RoundrobinBalanced,
-		lg:       cfg.Logger,
-		scs:      scs,
-		scToAddr: cfg.SubConnToResolverAddress,
-	}
-}
-
-type rrBalanced struct {
-	p Policy
-
-	lg *zap.Logger
-
-	mu       sync.RWMutex
-	next     int
-	scs      []balancer.SubConn
-	scToAddr map[balancer.SubConn]resolver.Address
-}
-
-func (rb *rrBalanced) String() string { return rb.p.String() }
-
-// Pick is called for every client request.
-func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	rb.mu.RLock()
-	n := len(rb.scs)
-	rb.mu.RUnlock()
-	if n == 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
-	rb.mu.Lock()
-	cur := rb.next
-	sc := rb.scs[cur]
-	picked := rb.scToAddr[sc].Addr
-	rb.next = (rb.next + 1) % len(rb.scs)
-	rb.mu.Unlock()
-
-	rb.lg.Debug(
-		"picked",
-		zap.String("picker", rb.p.String()),
-		zap.String("address", picked),
-		zap.Int("subconn-index", cur),
-		zap.Int("subconn-size", n),
-	)
-
-	doneFunc := func(info balancer.DoneInfo) {
-		// TODO: error handling?
-		fss := []zapcore.Field{
-			zap.Error(info.Err),
-			zap.String("picker", rb.p.String()),
-			zap.String("address", picked),
-			zap.Bool("success", info.Err == nil),
-			zap.Bool("bytes-sent", info.BytesSent),
-			zap.Bool("bytes-received", info.BytesReceived),
-		}
-		if info.Err == nil {
-			rb.lg.Debug("balancer done", fss...)
-		} else {
-			rb.lg.Warn("balancer failed", fss...)
-		}
-	}
-	return sc, doneFunc, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
deleted file mode 100644
index 864b5df..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
-package endpoint
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"net/url"
-	"strings"
-	"sync"
-
-	"google.golang.org/grpc/resolver"
-)
-
-const scheme = "endpoint"
-
-var (
-	targetPrefix = fmt.Sprintf("%s://", scheme)
-
-	bldr *builder
-)
-
-func init() {
-	bldr = &builder{
-		resolverGroups: make(map[string]*ResolverGroup),
-	}
-	resolver.Register(bldr)
-}
-
-type builder struct {
-	mu             sync.RWMutex
-	resolverGroups map[string]*ResolverGroup
-}
-
-// NewResolverGroup creates a new ResolverGroup with the given id.
-func NewResolverGroup(id string) (*ResolverGroup, error) {
-	return bldr.newResolverGroup(id)
-}
-
-// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
-// up-to-date.
-type ResolverGroup struct {
-	mu        sync.RWMutex
-	id        string
-	endpoints []string
-	resolvers []*Resolver
-}
-
-func (e *ResolverGroup) addResolver(r *Resolver) {
-	e.mu.Lock()
-	addrs := epsToAddrs(e.endpoints...)
-	e.resolvers = append(e.resolvers, r)
-	e.mu.Unlock()
-	r.cc.NewAddress(addrs)
-}
-
-func (e *ResolverGroup) removeResolver(r *Resolver) {
-	e.mu.Lock()
-	for i, er := range e.resolvers {
-		if er == r {
-			e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
-			break
-		}
-	}
-	e.mu.Unlock()
-}
-
-// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
-// immediately with the new endpoints.
-func (e *ResolverGroup) SetEndpoints(endpoints []string) {
-	addrs := epsToAddrs(endpoints...)
-	e.mu.Lock()
-	e.endpoints = endpoints
-	for _, r := range e.resolvers {
-		r.cc.NewAddress(addrs)
-	}
-	e.mu.Unlock()
-}
-
-// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
-func (e *ResolverGroup) Target(endpoint string) string {
-	return Target(e.id, endpoint)
-}
-
-// Target constructs a endpoint resolver target.
-func Target(id, endpoint string) string {
-	return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
-}
-
-// IsTarget checks if a given target string in an endpoint resolver target.
-func IsTarget(target string) bool {
-	return strings.HasPrefix(target, "endpoint://")
-}
-
-func (e *ResolverGroup) Close() {
-	bldr.close(e.id)
-}
-
-// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
-func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	if len(target.Authority) < 1 {
-		return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
-	}
-	id := target.Authority
-	es, err := b.getResolverGroup(id)
-	if err != nil {
-		return nil, fmt.Errorf("failed to build resolver: %v", err)
-	}
-	r := &Resolver{
-		endpointID: id,
-		cc:         cc,
-	}
-	es.addResolver(r)
-	return r, nil
-}
-
-func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
-	b.mu.RLock()
-	_, ok := b.resolverGroups[id]
-	b.mu.RUnlock()
-	if ok {
-		return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
-	}
-
-	es := &ResolverGroup{id: id}
-	b.mu.Lock()
-	b.resolverGroups[id] = es
-	b.mu.Unlock()
-	return es, nil
-}
-
-func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
-	b.mu.RLock()
-	es, ok := b.resolverGroups[id]
-	b.mu.RUnlock()
-	if !ok {
-		return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
-	}
-	return es, nil
-}
-
-func (b *builder) close(id string) {
-	b.mu.Lock()
-	delete(b.resolverGroups, id)
-	b.mu.Unlock()
-}
-
-func (b *builder) Scheme() string {
-	return scheme
-}
-
-// Resolver provides a resolver for a single etcd cluster, identified by name.
-type Resolver struct {
-	endpointID string
-	cc         resolver.ClientConn
-	sync.RWMutex
-}
-
-// TODO: use balancer.epsToAddrs
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
-	addrs = make([]resolver.Address, 0, len(eps))
-	for _, ep := range eps {
-		addrs = append(addrs, resolver.Address{Addr: ep})
-	}
-	return addrs
-}
-
-func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
-
-func (r *Resolver) Close() {
-	es, err := bldr.getResolverGroup(r.endpointID)
-	if err != nil {
-		return
-	}
-	es.removeResolver(r)
-}
-
-// ParseEndpoint endpoint parses an endpoint of the form
-// (http|https)://<host>*|(unix|unixs)://<path>)
-// and returns a protocol ('tcp' or 'unix'),
-// host (or filepath if a unix socket),
-// scheme (http, https, unix, unixs).
-func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
-	proto = "tcp"
-	host = endpoint
-	url, uerr := url.Parse(endpoint)
-	if uerr != nil || !strings.Contains(endpoint, "://") {
-		return proto, host, scheme
-	}
-	scheme = url.Scheme
-
-	// strip scheme:// prefix since grpc dials by host
-	host = url.Host
-	switch url.Scheme {
-	case "http", "https":
-	case "unix", "unixs":
-		proto = "unix"
-		host = url.Host + url.Path
-	default:
-		proto, host = "", ""
-	}
-	return proto, host, scheme
-}
-
-// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
-// If the target is malformed, an error is returned.
-func ParseTarget(target string) (string, string, error) {
-	noPrefix := strings.TrimPrefix(target, targetPrefix)
-	if noPrefix == target {
-		return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
-	}
-	parts := strings.SplitN(noPrefix, "/", 2)
-	if len(parts) != 2 {
-		return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
-	}
-	return parts[0], parts[1], nil
-}
-
-// Dialer dials a endpoint using net.Dialer.
-// Context cancelation and timeout are supported.
-func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
-	proto, host, _ := ParseEndpoint(dialEp)
-	select {
-	case <-ctx.Done():
-		return nil, ctx.Err()
-	default:
-	}
-	dialer := &net.Dialer{}
-	if deadline, ok := ctx.Deadline(); ok {
-		dialer.Deadline = deadline
-	}
-	return dialer.DialContext(ctx, proto, host)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
deleted file mode 100644
index 48eb875..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import (
-	"fmt"
-	"net/url"
-	"sort"
-	"sync/atomic"
-	"time"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-func scToString(sc balancer.SubConn) string {
-	return fmt.Sprintf("%p", sc)
-}
-
-func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
-	ss = make([]string, 0, len(scs))
-	for sc, a := range scs {
-		ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
-	}
-	sort.Strings(ss)
-	return ss
-}
-
-func addrsToStrings(addrs []resolver.Address) (ss []string) {
-	ss = make([]string, len(addrs))
-	for i := range addrs {
-		ss[i] = addrs[i].Addr
-	}
-	sort.Strings(ss)
-	return ss
-}
-
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
-	addrs = make([]resolver.Address, 0, len(eps))
-	for _, ep := range eps {
-		u, err := url.Parse(ep)
-		if err != nil {
-			addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
-			continue
-		}
-		addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
-	}
-	return addrs
-}
-
-var genN = new(uint32)
-
-func genName() string {
-	now := time.Now().UnixNano()
-	return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
deleted file mode 100644
index 2dc2012..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credentials implements gRPC credential interface with etcd specific logic.
-// e.g., client handshake with custom authority parameter
-package credentials
-
-import (
-	"context"
-	"crypto/tls"
-	"net"
-	"sync"
-
-	"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	grpccredentials "google.golang.org/grpc/credentials"
-)
-
-// Config defines gRPC credential configuration.
-type Config struct {
-	TLSConfig *tls.Config
-}
-
-// Bundle defines gRPC credential interface.
-type Bundle interface {
-	grpccredentials.Bundle
-	UpdateAuthToken(token string)
-}
-
-// NewBundle constructs a new gRPC credential bundle.
-func NewBundle(cfg Config) Bundle {
-	return &bundle{
-		tc: newTransportCredential(cfg.TLSConfig),
-		rc: newPerRPCCredential(),
-	}
-}
-
-// bundle implements "grpccredentials.Bundle" interface.
-type bundle struct {
-	tc *transportCredential
-	rc *perRPCCredential
-}
-
-func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
-	return b.tc
-}
-
-func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
-	return b.rc
-}
-
-func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
-	// no-op
-	return nil, nil
-}
-
-// transportCredential implements "grpccredentials.TransportCredentials" interface.
-// transportCredential wraps TransportCredentials to track which
-// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
-// hostname or IP of the dialed endpoint.
-// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
-// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
-// in their TLS certs.
-// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
-// when dialing.
-type transportCredential struct {
-	gtc grpccredentials.TransportCredentials
-	mu  sync.Mutex
-	// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
-	// endpoint provided to the dialer when dialing
-	addrToEndpoint map[string]string
-}
-
-func newTransportCredential(cfg *tls.Config) *transportCredential {
-	return &transportCredential{
-		gtc:            grpccredentials.NewTLS(cfg),
-		addrToEndpoint: map[string]string{},
-	}
-}
-
-func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
-	// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
-	tc.mu.Lock()
-	dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
-	tc.mu.Unlock()
-	if ok {
-		_, host, _ := endpoint.ParseEndpoint(dialEp)
-		authority = host
-	}
-	return tc.gtc.ClientHandshake(ctx, authority, rawConn)
-}
-
-// return true if given string is an IP.
-func isIP(ep string) bool {
-	return net.ParseIP(ep) != nil
-}
-
-func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
-	return tc.gtc.ServerHandshake(rawConn)
-}
-
-func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
-	return tc.gtc.Info()
-}
-
-func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
-	copy := map[string]string{}
-	tc.mu.Lock()
-	for k, v := range tc.addrToEndpoint {
-		copy[k] = v
-	}
-	tc.mu.Unlock()
-	return &transportCredential{
-		gtc:            tc.gtc.Clone(),
-		addrToEndpoint: copy,
-	}
-}
-
-func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
-	return tc.gtc.OverrideServerName(serverNameOverride)
-}
-
-func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
-	// Keep track of which addresses are dialed for which endpoints
-	conn, err := endpoint.Dialer(ctx, dialEp)
-	if conn != nil {
-		tc.mu.Lock()
-		tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
-		tc.mu.Unlock()
-	}
-	return conn, err
-}
-
-// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
-type perRPCCredential struct {
-	authToken   string
-	authTokenMu sync.RWMutex
-}
-
-func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
-
-func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
-
-func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
-	rc.authTokenMu.RLock()
-	authToken := rc.authToken
-	rc.authTokenMu.RUnlock()
-	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
-}
-
-func (b *bundle) UpdateAuthToken(token string) {
-	if b.rc == nil {
-		return
-	}
-	b.rc.UpdateAuthToken(token)
-}
-
-func (rc *perRPCCredential) UpdateAuthToken(token string) {
-	rc.authTokenMu.Lock()
-	rc.authToken = token
-	rc.authTokenMu.Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
deleted file mode 100644
index f72c6a6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
-package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index bc1ad7b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
-	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
-	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
-	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
-	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
-	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
-	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
-	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
-	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
-	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
-	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
-	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
-	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
-	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
-	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
-	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
-	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
-	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
-
-	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
-	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
-	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
-	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
-	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
-	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
-	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
-	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
-	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
-	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
-	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
-	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
-	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
-	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
-	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
-	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
-
-	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
-	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
-	ErrGRPCLeaderChanged              = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
-	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
-	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
-	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
-	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
-	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
-	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
-	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
-
-	errStringToError = map[string]error{
-		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
-		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
-		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
-		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
-		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
-		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
-		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
-		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
-		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
-
-		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
-		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
-		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
-		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
-		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
-		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
-		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
-		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
-
-		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
-		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
-		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
-		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
-		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
-		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
-		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
-		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
-		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
-		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
-		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
-		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
-		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
-		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
-		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
-		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
-
-		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
-		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
-		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
-		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
-		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
-		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
-		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
-		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
-		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
-	}
-)
-
-// client-side error
-var (
-	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
-	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
-	ErrValueProvided = Error(ErrGRPCValueProvided)
-	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
-	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
-	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
-	ErrCompacted     = Error(ErrGRPCCompacted)
-	ErrFutureRev     = Error(ErrGRPCFutureRev)
-	ErrNoSpace       = Error(ErrGRPCNoSpace)
-
-	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
-	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
-	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
-	ErrMemberExist            = Error(ErrGRPCMemberExist)
-	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
-	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
-	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
-	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
-
-	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
-	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
-	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
-	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
-	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
-	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
-	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
-	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
-	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
-	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
-	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
-	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
-	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
-	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
-	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
-	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
-
-	ErrNoLeader                   = Error(ErrGRPCNoLeader)
-	ErrNotLeader                  = Error(ErrGRPCNotLeader)
-	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
-	ErrNotCapable                 = Error(ErrGRPCNotCapable)
-	ErrStopped                    = Error(ErrGRPCStopped)
-	ErrTimeout                    = Error(ErrGRPCTimeout)
-	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
-	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
-	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
-	ErrCorrupt                    = Error(ErrGRPCCorrupt)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
-	code codes.Code
-	desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
-	return e.code
-}
-
-func (e EtcdError) Error() string {
-	return e.desc
-}
-
-func Error(err error) error {
-	if err == nil {
-		return nil
-	}
-	verr, ok := errStringToError[ErrorDesc(err)]
-	if !ok { // not gRPC error
-		return err
-	}
-	ev, ok := status.FromError(verr)
-	var desc string
-	if ok {
-		desc = ev.Message()
-	} else {
-		desc = verr.Error()
-	}
-	return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
-	if s, ok := status.FromError(err); ok {
-		return s.Message()
-	}
-	return err.Error()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
deleted file mode 100644
index 90b8b83..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
-	MetadataRequireLeaderKey = "hasleader"
-	MetadataHasLeader        = "true"
-
-	MetadataClientAPIVersionKey = "client-api-version"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
deleted file mode 100644
index 8f8ac60..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
-	TokenFieldNameGRPC    = "token"
-	TokenFieldNameSwagger = "authorization"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
deleted file mode 100644
index 12b6763..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
+++ /dev/null
@@ -1,1034 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: etcdserver.proto
-
-package etcdserverpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Request struct {
-	ID                   uint64   `protobuf:"varint,1,opt,name=ID" json:"ID"`
-	Method               string   `protobuf:"bytes,2,opt,name=Method" json:"Method"`
-	Path                 string   `protobuf:"bytes,3,opt,name=Path" json:"Path"`
-	Val                  string   `protobuf:"bytes,4,opt,name=Val" json:"Val"`
-	Dir                  bool     `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
-	PrevValue            string   `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
-	PrevIndex            uint64   `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
-	PrevExist            *bool    `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
-	Expiration           int64    `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
-	Wait                 bool     `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
-	Since                uint64   `protobuf:"varint,11,opt,name=Since" json:"Since"`
-	Recursive            bool     `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
-	Sorted               bool     `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
-	Quorum               bool     `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
-	Time                 int64    `protobuf:"varint,15,opt,name=Time" json:"Time"`
-	Stream               bool     `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
-	Refresh              *bool    `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Request) Reset()         { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage()    {}
-func (*Request) Descriptor() ([]byte, []int) {
-	return fileDescriptor_09ffbeb3bebbce7e, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Request) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
-	return m.Size()
-}
-func (m *Request) XXX_DiscardUnknown() {
-	xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-type Metadata struct {
-	NodeID               uint64   `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
-	ClusterID            uint64   `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Metadata) Reset()         { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage()    {}
-func (*Metadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_09ffbeb3bebbce7e, []int{1}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Metadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metadata.Merge(m, src)
-}
-func (m *Metadata) XXX_Size() int {
-	return m.Size()
-}
-func (m *Metadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
-	proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
-}
-
-func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
-
-var fileDescriptor_09ffbeb3bebbce7e = []byte{
-	// 380 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
-	0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
-	0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
-	0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
-	0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
-	0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
-	0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
-	0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
-	0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
-	0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
-	0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
-	0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
-	0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
-	0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
-	0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
-	0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
-	0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
-	0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
-	0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
-	0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
-	0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
-	0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
-	0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
-	0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
-}
-
-func (m *Request) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Request) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Refresh != nil {
-		i--
-		if *m.Refresh {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x1
-		i--
-		dAtA[i] = 0x88
-	}
-	i--
-	if m.Stream {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x1
-	i--
-	dAtA[i] = 0x80
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
-	i--
-	dAtA[i] = 0x78
-	i--
-	if m.Quorum {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x70
-	i--
-	if m.Sorted {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x68
-	i--
-	if m.Recursive {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x60
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
-	i--
-	dAtA[i] = 0x58
-	i--
-	if m.Wait {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x50
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
-	i--
-	dAtA[i] = 0x48
-	if m.PrevExist != nil {
-		i--
-		if *m.PrevExist {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
-	i--
-	dAtA[i] = 0x38
-	i -= len(m.PrevValue)
-	copy(dAtA[i:], m.PrevValue)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
-	i--
-	dAtA[i] = 0x32
-	i--
-	if m.Dir {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x28
-	i -= len(m.Val)
-	copy(dAtA[i:], m.Val)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
-	i--
-	dAtA[i] = 0x22
-	i -= len(m.Path)
-	copy(dAtA[i:], m.Path)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
-	i--
-	dAtA[i] = 0x1a
-	i -= len(m.Method)
-	copy(dAtA[i:], m.Method)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
-	i--
-	dAtA[i] = 0x12
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *Metadata) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
-	offset -= sovEtcdserver(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *Request) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovEtcdserver(uint64(m.ID))
-	l = len(m.Method)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	l = len(m.Path)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	l = len(m.Val)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	n += 2
-	l = len(m.PrevValue)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	n += 1 + sovEtcdserver(uint64(m.PrevIndex))
-	if m.PrevExist != nil {
-		n += 2
-	}
-	n += 1 + sovEtcdserver(uint64(m.Expiration))
-	n += 2
-	n += 1 + sovEtcdserver(uint64(m.Since))
-	n += 2
-	n += 2
-	n += 2
-	n += 1 + sovEtcdserver(uint64(m.Time))
-	n += 3
-	if m.Refresh != nil {
-		n += 3
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Metadata) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovEtcdserver(uint64(m.NodeID))
-	n += 1 + sovEtcdserver(uint64(m.ClusterID))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovEtcdserver(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozEtcdserver(x uint64) (n int) {
-	return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Request) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Request: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Method = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Path = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Val = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Dir = bool(v != 0)
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PrevValue = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
-			}
-			m.PrevIndex = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.PrevIndex |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			b := bool(v != 0)
-			m.PrevExist = &b
-		case 9:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
-			}
-			m.Expiration = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Expiration |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Wait = bool(v != 0)
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
-			}
-			m.Since = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Since |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Recursive = bool(v != 0)
-		case 13:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Sorted = bool(v != 0)
-		case 14:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Quorum = bool(v != 0)
-		case 15:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
-			}
-			m.Time = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Time |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 16:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Stream = bool(v != 0)
-		case 17:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			b := bool(v != 0)
-			m.Refresh = &b
-		default:
-			iNdEx = preIndex
-			skippy, err := skipEtcdserver(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Metadata) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
-			}
-			m.NodeID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.NodeID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
-			}
-			m.ClusterID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ClusterID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipEtcdserver(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipEtcdserver(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthEtcdserver
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthEtcdserver
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowEtcdserver
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipEtcdserver(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthEtcdserver
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowEtcdserver   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
deleted file mode 100644
index 25e0aca..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-syntax = "proto2";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message Request {
-	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
-	optional string Method     =  2 [(gogoproto.nullable) = false];
-	optional string Path       =  3 [(gogoproto.nullable) = false];
-	optional string Val        =  4 [(gogoproto.nullable) = false];
-	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
-	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
-	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
-	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
-	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
-	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
-	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
-	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
-	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
-	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
-	optional int64  Time       = 15 [(gogoproto.nullable) = false];
-	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
-	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
-}
-
-message Metadata {
-	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
-	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
deleted file mode 100644
index b3a199e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
+++ /dev/null
@@ -1,2427 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft_internal.proto
-
-package etcdserverpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RequestHeader struct {
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// username is a username that is associated with an auth token of gRPC connection
-	Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
-	// auth_revision is a revision number of auth.authStore. It is not related to mvcc
-	AuthRevision         uint64   `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RequestHeader) Reset()         { *m = RequestHeader{} }
-func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
-func (*RequestHeader) ProtoMessage()    {}
-func (*RequestHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{0}
-}
-func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RequestHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RequestHeader.Merge(m, src)
-}
-func (m *RequestHeader) XXX_Size() int {
-	return m.Size()
-}
-func (m *RequestHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_RequestHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-type InternalRaftRequest struct {
-	Header                   *RequestHeader                   `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
-	ID                       uint64                           `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	V2                       *Request                         `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
-	Range                    *RangeRequest                    `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
-	Put                      *PutRequest                      `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
-	DeleteRange              *DeleteRangeRequest              `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
-	Txn                      *TxnRequest                      `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
-	Compaction               *CompactionRequest               `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
-	LeaseGrant               *LeaseGrantRequest               `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
-	LeaseRevoke              *LeaseRevokeRequest              `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
-	Alarm                    *AlarmRequest                    `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
-	AuthEnable               *AuthEnableRequest               `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
-	AuthDisable              *AuthDisableRequest              `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
-	Authenticate             *InternalAuthenticateRequest     `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
-	AuthUserAdd              *AuthUserAddRequest              `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
-	AuthUserDelete           *AuthUserDeleteRequest           `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
-	AuthUserGet              *AuthUserGetRequest              `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
-	AuthUserChangePassword   *AuthUserChangePasswordRequest   `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
-	AuthUserGrantRole        *AuthUserGrantRoleRequest        `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
-	AuthUserRevokeRole       *AuthUserRevokeRoleRequest       `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
-	AuthUserList             *AuthUserListRequest             `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
-	AuthRoleList             *AuthRoleListRequest             `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
-	AuthRoleAdd              *AuthRoleAddRequest              `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
-	AuthRoleDelete           *AuthRoleDeleteRequest           `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
-	AuthRoleGet              *AuthRoleGetRequest              `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
-	AuthRoleGrantPermission  *AuthRoleGrantPermissionRequest  `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
-	AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
-	XXX_NoUnkeyedLiteral     struct{}                         `json:"-"`
-	XXX_unrecognized         []byte                           `json:"-"`
-	XXX_sizecache            int32                            `json:"-"`
-}
-
-func (m *InternalRaftRequest) Reset()         { *m = InternalRaftRequest{} }
-func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalRaftRequest) ProtoMessage()    {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{1}
-}
-func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InternalRaftRequest.Merge(m, src)
-}
-func (m *InternalRaftRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *InternalRaftRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
-
-type EmptyResponse struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EmptyResponse) Reset()         { *m = EmptyResponse{} }
-func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
-func (*EmptyResponse) ProtoMessage()    {}
-func (*EmptyResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{2}
-}
-func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *EmptyResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EmptyResponse.Merge(m, src)
-}
-func (m *EmptyResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *EmptyResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-type InternalAuthenticateRequest struct {
-	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	// simple_token is generated in API layer (etcdserver/v3_server.go)
-	SimpleToken          string   `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InternalAuthenticateRequest) Reset()         { *m = InternalAuthenticateRequest{} }
-func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalAuthenticateRequest) ProtoMessage()    {}
-func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{3}
-}
-func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
-}
-func (m *InternalAuthenticateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
-	proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
-	proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
-	proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
-}
-
-func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
-
-var fileDescriptor_b4c9a9be0cfca103 = []byte{
-	// 840 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48,
-	0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd,
-	0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52,
-	0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e,
-	0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6,
-	0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4,
-	0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08,
-	0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86,
-	0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd,
-	0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3,
-	0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4,
-	0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d,
-	0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee,
-	0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63,
-	0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42,
-	0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34,
-	0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f,
-	0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08,
-	0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5,
-	0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99,
-	0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d,
-	0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c,
-	0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63,
-	0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44,
-	0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63,
-	0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02,
-	0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22,
-	0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b,
-	0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b,
-	0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b,
-	0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9,
-	0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5,
-	0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88,
-	0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c,
-	0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd,
-	0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17,
-	0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d,
-	0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60,
-	0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd,
-	0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25,
-	0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a,
-	0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29,
-	0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba,
-	0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54,
-	0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1,
-	0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1,
-	0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c,
-	0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f,
-	0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74,
-	0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74,
-	0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15,
-	0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff,
-	0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00,
-}
-
-func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.AuthRevision != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Username) > 0 {
-		i -= len(m.Username)
-		copy(dAtA[i:], m.Username)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.AuthRoleRevokePermission != nil {
-		{
-			size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.AuthRoleGrantPermission != nil {
-		{
-			size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthRoleGet != nil {
-		{
-			size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x92
-	}
-	if m.AuthRoleDelete != nil {
-		{
-			size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x8a
-	}
-	if m.AuthRoleAdd != nil {
-		{
-			size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.AuthRoleList != nil {
-		{
-			size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthUserList != nil {
-		{
-			size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x92
-	}
-	if m.AuthUserRevokeRole != nil {
-		{
-			size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x8a
-	}
-	if m.AuthUserGrantRole != nil {
-		{
-			size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.AuthUserChangePassword != nil {
-		{
-			size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xfa
-	}
-	if m.AuthUserGet != nil {
-		{
-			size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xf2
-	}
-	if m.AuthUserDelete != nil {
-		{
-			size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xea
-	}
-	if m.AuthUserAdd != nil {
-		{
-			size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xe2
-	}
-	if m.Authenticate != nil {
-		{
-			size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3f
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.AuthDisable != nil {
-		{
-			size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3f
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthEnable != nil {
-		{
-			size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3e
-		i--
-		dAtA[i] = 0xc2
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x6
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.Alarm != nil {
-		{
-			size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x52
-	}
-	if m.LeaseRevoke != nil {
-		{
-			size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4a
-	}
-	if m.LeaseGrant != nil {
-		{
-			size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x42
-	}
-	if m.Compaction != nil {
-		{
-			size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3a
-	}
-	if m.Txn != nil {
-		{
-			size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.DeleteRange != nil {
-		{
-			size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.Put != nil {
-		{
-			size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	if m.Range != nil {
-		{
-			size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.V2 != nil {
-		{
-			size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.SimpleToken) > 0 {
-		i -= len(m.SimpleToken)
-		copy(dAtA[i:], m.SimpleToken)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRaftInternal(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *RequestHeader) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRaftInternal(uint64(m.ID))
-	}
-	l = len(m.Username)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRevision != 0 {
-		n += 1 + sovRaftInternal(uint64(m.AuthRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *InternalRaftRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRaftInternal(uint64(m.ID))
-	}
-	if m.V2 != nil {
-		l = m.V2.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Range != nil {
-		l = m.Range.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Put != nil {
-		l = m.Put.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.DeleteRange != nil {
-		l = m.DeleteRange.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Txn != nil {
-		l = m.Txn.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Compaction != nil {
-		l = m.Compaction.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.LeaseGrant != nil {
-		l = m.LeaseGrant.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.LeaseRevoke != nil {
-		l = m.LeaseRevoke.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Alarm != nil {
-		l = m.Alarm.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthEnable != nil {
-		l = m.AuthEnable.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthDisable != nil {
-		l = m.AuthDisable.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Authenticate != nil {
-		l = m.Authenticate.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserAdd != nil {
-		l = m.AuthUserAdd.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserDelete != nil {
-		l = m.AuthUserDelete.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserGet != nil {
-		l = m.AuthUserGet.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserChangePassword != nil {
-		l = m.AuthUserChangePassword.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserGrantRole != nil {
-		l = m.AuthUserGrantRole.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserRevokeRole != nil {
-		l = m.AuthUserRevokeRole.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserList != nil {
-		l = m.AuthUserList.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleList != nil {
-		l = m.AuthRoleList.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleAdd != nil {
-		l = m.AuthRoleAdd.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleDelete != nil {
-		l = m.AuthRoleDelete.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleGet != nil {
-		l = m.AuthRoleGet.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleGrantPermission != nil {
-		l = m.AuthRoleGrantPermission.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleRevokePermission != nil {
-		l = m.AuthRoleRevokePermission.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *EmptyResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *InternalAuthenticateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	l = len(m.SimpleToken)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRaftInternal(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaftInternal(x uint64) (n int) {
-	return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *RequestHeader) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Username = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
-			}
-			m.AuthRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.AuthRevision |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.V2 == nil {
-				m.V2 = &Request{}
-			}
-			if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Range == nil {
-				m.Range = &RangeRequest{}
-			}
-			if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Put == nil {
-				m.Put = &PutRequest{}
-			}
-			if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.DeleteRange == nil {
-				m.DeleteRange = &DeleteRangeRequest{}
-			}
-			if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Txn == nil {
-				m.Txn = &TxnRequest{}
-			}
-			if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Compaction == nil {
-				m.Compaction = &CompactionRequest{}
-			}
-			if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.LeaseGrant == nil {
-				m.LeaseGrant = &LeaseGrantRequest{}
-			}
-			if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 9:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.LeaseRevoke == nil {
-				m.LeaseRevoke = &LeaseRevokeRequest{}
-			}
-			if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 10:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Alarm == nil {
-				m.Alarm = &AlarmRequest{}
-			}
-			if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 100:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &RequestHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1000:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthEnable == nil {
-				m.AuthEnable = &AuthEnableRequest{}
-			}
-			if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1011:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthDisable == nil {
-				m.AuthDisable = &AuthDisableRequest{}
-			}
-			if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1012:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Authenticate == nil {
-				m.Authenticate = &InternalAuthenticateRequest{}
-			}
-			if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1100:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserAdd == nil {
-				m.AuthUserAdd = &AuthUserAddRequest{}
-			}
-			if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1101:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserDelete == nil {
-				m.AuthUserDelete = &AuthUserDeleteRequest{}
-			}
-			if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1102:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserGet == nil {
-				m.AuthUserGet = &AuthUserGetRequest{}
-			}
-			if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1103:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserChangePassword == nil {
-				m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
-			}
-			if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1104:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserGrantRole == nil {
-				m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
-			}
-			if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1105:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserRevokeRole == nil {
-				m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
-			}
-			if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1106:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserList == nil {
-				m.AuthUserList = &AuthUserListRequest{}
-			}
-			if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1107:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleList == nil {
-				m.AuthRoleList = &AuthRoleListRequest{}
-			}
-			if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1200:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleAdd == nil {
-				m.AuthRoleAdd = &AuthRoleAddRequest{}
-			}
-			if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1201:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleDelete == nil {
-				m.AuthRoleDelete = &AuthRoleDeleteRequest{}
-			}
-			if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1202:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleGet == nil {
-				m.AuthRoleGet = &AuthRoleGetRequest{}
-			}
-			if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1203:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleGrantPermission == nil {
-				m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
-			}
-			if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1204:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleRevokePermission == nil {
-				m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
-			}
-			if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.SimpleToken = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRaftInternal(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRaftInternal
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRaftInternal
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRaftInternal
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRaftInternal(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRaftInternal
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRaftInternal   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
deleted file mode 100644
index 25d45d3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
+++ /dev/null
@@ -1,74 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcdserver.proto";
-import "rpc.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message RequestHeader {
-  uint64 ID = 1;
-  // username is a username that is associated with an auth token of gRPC connection
-  string username = 2;
-  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
-  uint64 auth_revision = 3;
-}
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-message InternalRaftRequest {
-  RequestHeader header = 100;
-  uint64 ID = 1;
-
-  Request v2 = 2;
-
-  RangeRequest range = 3;
-  PutRequest put = 4;
-  DeleteRangeRequest delete_range = 5;
-  TxnRequest txn = 6;
-  CompactionRequest compaction = 7;
-
-  LeaseGrantRequest lease_grant = 8;
-  LeaseRevokeRequest lease_revoke = 9;
-
-  AlarmRequest alarm = 10;
-
-  AuthEnableRequest auth_enable = 1000;
-  AuthDisableRequest auth_disable = 1011;
-
-  InternalAuthenticateRequest authenticate = 1012;
-
-  AuthUserAddRequest auth_user_add = 1100;
-  AuthUserDeleteRequest auth_user_delete = 1101;
-  AuthUserGetRequest auth_user_get = 1102;
-  AuthUserChangePasswordRequest auth_user_change_password = 1103;
-  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
-  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
-  AuthUserListRequest auth_user_list = 1106;
-  AuthRoleListRequest auth_role_list = 1107;
-
-  AuthRoleAddRequest auth_role_add = 1200;
-  AuthRoleDeleteRequest auth_role_delete = 1201;
-  AuthRoleGetRequest auth_role_get = 1202;
-  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
-  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
-}
-
-message EmptyResponse {
-}
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-message InternalAuthenticateRequest {
-  string name = 1;
-  string password = 2;
-
-  // simple_token is generated in API layer (etcdserver/v3_server.go)
-  string simple_token = 3;
-}
-
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
deleted file mode 100644
index 31e121e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserverpb
-
-import (
-	"fmt"
-	"strings"
-
-	proto "github.com/golang/protobuf/proto"
-)
-
-// InternalRaftStringer implements custom proto Stringer:
-// redact password, replace value fields with value_size fields.
-type InternalRaftStringer struct {
-	Request *InternalRaftRequest
-}
-
-func (as *InternalRaftStringer) String() string {
-	switch {
-	case as.Request.LeaseGrant != nil:
-		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
-			as.Request.Header.String(),
-			as.Request.LeaseGrant.TTL,
-			as.Request.LeaseGrant.ID,
-		)
-	case as.Request.LeaseRevoke != nil:
-		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
-			as.Request.Header.String(),
-			as.Request.LeaseRevoke.ID,
-		)
-	case as.Request.Authenticate != nil:
-		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
-			as.Request.Header.String(),
-			as.Request.Authenticate.Name,
-			as.Request.Authenticate.SimpleToken,
-		)
-	case as.Request.AuthUserAdd != nil:
-		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
-			as.Request.Header.String(),
-			as.Request.AuthUserAdd.Name,
-		)
-	case as.Request.AuthUserChangePassword != nil:
-		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
-			as.Request.Header.String(),
-			as.Request.AuthUserChangePassword.Name,
-		)
-	case as.Request.Put != nil:
-		return fmt.Sprintf("header:<%s> put:<%s>",
-			as.Request.Header.String(),
-			NewLoggablePutRequest(as.Request.Put).String(),
-		)
-	case as.Request.Txn != nil:
-		return fmt.Sprintf("header:<%s> txn:<%s>",
-			as.Request.Header.String(),
-			NewLoggableTxnRequest(as.Request.Txn).String(),
-		)
-	default:
-		// nothing to redact
-	}
-	return as.Request.String()
-}
-
-// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
-// fields in any nested txn and put operations.
-type txnRequestStringer struct {
-	Request *TxnRequest
-}
-
-func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
-	return &txnRequestStringer{request}
-}
-
-func (as *txnRequestStringer) String() string {
-	var compare []string
-	for _, c := range as.Request.Compare {
-		switch cv := c.TargetUnion.(type) {
-		case *Compare_Value:
-			compare = append(compare, newLoggableValueCompare(c, cv).String())
-		default:
-			// nothing to redact
-			compare = append(compare, c.String())
-		}
-	}
-	var success []string
-	for _, s := range as.Request.Success {
-		success = append(success, newLoggableRequestOp(s).String())
-	}
-	var failure []string
-	for _, f := range as.Request.Failure {
-		failure = append(failure, newLoggableRequestOp(f).String())
-	}
-	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
-		strings.Join(compare, " "),
-		strings.Join(success, " "),
-		strings.Join(failure, " "),
-	)
-}
-
-// requestOpStringer implements a custom proto String to replace value bytes fields with value
-// size fields in any nested txn and put operations.
-type requestOpStringer struct {
-	Op *RequestOp
-}
-
-func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
-	return &requestOpStringer{op}
-}
-
-func (as *requestOpStringer) String() string {
-	switch op := as.Op.Request.(type) {
-	case *RequestOp_RequestPut:
-		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
-	case *RequestOp_RequestTxn:
-		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
-	default:
-		// nothing to redact
-	}
-	return as.Op.String()
-}
-
-// loggableValueCompare implements a custom proto String for Compare.Value union member types to
-// replace the value bytes field with a value size field.
-// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
-type loggableValueCompare struct {
-	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
-	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
-	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
-	ValueSize int64                 `protobuf:"varint,7,opt,name=value_size,proto3"`
-	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
-}
-
-func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
-	return &loggableValueCompare{
-		c.Result,
-		c.Target,
-		c.Key,
-		int64(len(cv.Value)),
-		c.RangeEnd,
-	}
-}
-
-func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
-func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
-func (*loggableValueCompare) ProtoMessage()    {}
-
-// loggablePutRequest implements a custom proto String to replace value bytes field with a value
-// size field.
-// To preserve proto encoding of the key bytes, a faked out proto type is used here.
-type loggablePutRequest struct {
-	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
-	ValueSize   int64  `protobuf:"varint,2,opt,name=value_size,proto3"`
-	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
-	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
-	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
-	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
-}
-
-func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
-	return &loggablePutRequest{
-		request.Key,
-		int64(len(request.Value)),
-		request.Lease,
-		request.PrevKv,
-		request.IgnoreValue,
-		request.IgnoreLease,
-	}
-}
-
-func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
-func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
-func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
deleted file mode 100644
index a0cff8f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
+++ /dev/null
@@ -1,24010 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: rpc.proto
-
-package etcdserverpb
-
-import (
-	context "context"
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	authpb "github.com/coreos/etcd/auth/authpb"
-	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-	_ "google.golang.org/genproto/googleapis/api/annotations"
-	grpc "google.golang.org/grpc"
-	codes "google.golang.org/grpc/codes"
-	status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type AlarmType int32
-
-const (
-	AlarmType_NONE    AlarmType = 0
-	AlarmType_NOSPACE AlarmType = 1
-	AlarmType_CORRUPT AlarmType = 2
-)
-
-var AlarmType_name = map[int32]string{
-	0: "NONE",
-	1: "NOSPACE",
-	2: "CORRUPT",
-}
-
-var AlarmType_value = map[string]int32{
-	"NONE":    0,
-	"NOSPACE": 1,
-	"CORRUPT": 2,
-}
-
-func (x AlarmType) String() string {
-	return proto.EnumName(AlarmType_name, int32(x))
-}
-
-func (AlarmType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-
-type RangeRequest_SortOrder int32
-
-const (
-	RangeRequest_NONE    RangeRequest_SortOrder = 0
-	RangeRequest_ASCEND  RangeRequest_SortOrder = 1
-	RangeRequest_DESCEND RangeRequest_SortOrder = 2
-)
-
-var RangeRequest_SortOrder_name = map[int32]string{
-	0: "NONE",
-	1: "ASCEND",
-	2: "DESCEND",
-}
-
-var RangeRequest_SortOrder_value = map[string]int32{
-	"NONE":    0,
-	"ASCEND":  1,
-	"DESCEND": 2,
-}
-
-func (x RangeRequest_SortOrder) String() string {
-	return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
-}
-
-func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
-}
-
-type RangeRequest_SortTarget int32
-
-const (
-	RangeRequest_KEY     RangeRequest_SortTarget = 0
-	RangeRequest_VERSION RangeRequest_SortTarget = 1
-	RangeRequest_CREATE  RangeRequest_SortTarget = 2
-	RangeRequest_MOD     RangeRequest_SortTarget = 3
-	RangeRequest_VALUE   RangeRequest_SortTarget = 4
-)
-
-var RangeRequest_SortTarget_name = map[int32]string{
-	0: "KEY",
-	1: "VERSION",
-	2: "CREATE",
-	3: "MOD",
-	4: "VALUE",
-}
-
-var RangeRequest_SortTarget_value = map[string]int32{
-	"KEY":     0,
-	"VERSION": 1,
-	"CREATE":  2,
-	"MOD":     3,
-	"VALUE":   4,
-}
-
-func (x RangeRequest_SortTarget) String() string {
-	return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
-}
-
-func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
-}
-
-type Compare_CompareResult int32
-
-const (
-	Compare_EQUAL     Compare_CompareResult = 0
-	Compare_GREATER   Compare_CompareResult = 1
-	Compare_LESS      Compare_CompareResult = 2
-	Compare_NOT_EQUAL Compare_CompareResult = 3
-)
-
-var Compare_CompareResult_name = map[int32]string{
-	0: "EQUAL",
-	1: "GREATER",
-	2: "LESS",
-	3: "NOT_EQUAL",
-}
-
-var Compare_CompareResult_value = map[string]int32{
-	"EQUAL":     0,
-	"GREATER":   1,
-	"LESS":      2,
-	"NOT_EQUAL": 3,
-}
-
-func (x Compare_CompareResult) String() string {
-	return proto.EnumName(Compare_CompareResult_name, int32(x))
-}
-
-func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
-}
-
-type Compare_CompareTarget int32
-
-const (
-	Compare_VERSION Compare_CompareTarget = 0
-	Compare_CREATE  Compare_CompareTarget = 1
-	Compare_MOD     Compare_CompareTarget = 2
-	Compare_VALUE   Compare_CompareTarget = 3
-	Compare_LEASE   Compare_CompareTarget = 4
-)
-
-var Compare_CompareTarget_name = map[int32]string{
-	0: "VERSION",
-	1: "CREATE",
-	2: "MOD",
-	3: "VALUE",
-	4: "LEASE",
-}
-
-var Compare_CompareTarget_value = map[string]int32{
-	"VERSION": 0,
-	"CREATE":  1,
-	"MOD":     2,
-	"VALUE":   3,
-	"LEASE":   4,
-}
-
-func (x Compare_CompareTarget) String() string {
-	return proto.EnumName(Compare_CompareTarget_name, int32(x))
-}
-
-func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
-}
-
-type WatchCreateRequest_FilterType int32
-
-const (
-	// filter out put event.
-	WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
-	// filter out delete event.
-	WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
-)
-
-var WatchCreateRequest_FilterType_name = map[int32]string{
-	0: "NOPUT",
-	1: "NODELETE",
-}
-
-var WatchCreateRequest_FilterType_value = map[string]int32{
-	"NOPUT":    0,
-	"NODELETE": 1,
-}
-
-func (x WatchCreateRequest_FilterType) String() string {
-	return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
-}
-
-func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
-}
-
-type AlarmRequest_AlarmAction int32
-
-const (
-	AlarmRequest_GET        AlarmRequest_AlarmAction = 0
-	AlarmRequest_ACTIVATE   AlarmRequest_AlarmAction = 1
-	AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
-)
-
-var AlarmRequest_AlarmAction_name = map[int32]string{
-	0: "GET",
-	1: "ACTIVATE",
-	2: "DEACTIVATE",
-}
-
-var AlarmRequest_AlarmAction_value = map[string]int32{
-	"GET":        0,
-	"ACTIVATE":   1,
-	"DEACTIVATE": 2,
-}
-
-func (x AlarmRequest_AlarmAction) String() string {
-	return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
-}
-
-func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{49, 0}
-}
-
-type ResponseHeader struct {
-	// cluster_id is the ID of the cluster which sent the response.
-	ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
-	// member_id is the ID of the member which sent the response.
-	MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
-	// revision is the key-value store revision when the request was applied.
-	// For watch progress responses, the header.revision indicates progress. All future events
-	// recieved in this stream are guaranteed to have a higher revision number than the
-	// header.revision number.
-	Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
-	// raft_term is the raft term when the request was applied.
-	RaftTerm             uint64   `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ResponseHeader) Reset()         { *m = ResponseHeader{} }
-func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
-func (*ResponseHeader) ProtoMessage()    {}
-func (*ResponseHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ResponseHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ResponseHeader.Merge(m, src)
-}
-func (m *ResponseHeader) XXX_Size() int {
-	return m.Size()
-}
-func (m *ResponseHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
-
-func (m *ResponseHeader) GetClusterId() uint64 {
-	if m != nil {
-		return m.ClusterId
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetMemberId() uint64 {
-	if m != nil {
-		return m.MemberId
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetRaftTerm() uint64 {
-	if m != nil {
-		return m.RaftTerm
-	}
-	return 0
-}
-
-type RangeRequest struct {
-	// key is the first key for the range. If range_end is not given, the request only looks up key.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the upper bound on the requested range [key, range_end).
-	// If range_end is '\0', the range is all keys >= key.
-	// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
-	// then the range request gets all keys prefixed with key.
-	// If both key and range_end are '\0', then the range request returns all keys.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// limit is a limit on the number of keys returned for the request. When limit is set to 0,
-	// it is treated as no limit.
-	Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
-	// revision is the point-in-time of the key-value store to use for the range.
-	// If revision is less or equal to zero, the range is over the newest key-value store.
-	// If the revision has been compacted, ErrCompacted is returned as a response.
-	Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
-	// sort_order is the order for returned sorted results.
-	SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
-	// sort_target is the key-value field to use for sorting.
-	SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
-	// serializable sets the range request to use serializable member-local reads.
-	// Range requests are linearizable by default; linearizable requests have higher
-	// latency and lower throughput than serializable requests but reflect the current
-	// consensus of the cluster. For better performance, in exchange for possible stale reads,
-	// a serializable range request is served locally without needing to reach consensus
-	// with other nodes in the cluster.
-	Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
-	// keys_only when set returns only the keys and not the values.
-	KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
-	// count_only when set returns only the count of the keys in the range.
-	CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
-	// min_mod_revision is the lower bound for returned key mod revisions; all keys with
-	// lesser mod revisions will be filtered away.
-	MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
-	// max_mod_revision is the upper bound for returned key mod revisions; all keys with
-	// greater mod revisions will be filtered away.
-	MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
-	// min_create_revision is the lower bound for returned key create revisions; all keys with
-	// lesser create trevisions will be filtered away.
-	MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
-	// max_create_revision is the upper bound for returned key create revisions; all keys with
-	// greater create revisions will be filtered away.
-	MaxCreateRevision    int64    `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RangeRequest) Reset()         { *m = RangeRequest{} }
-func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
-func (*RangeRequest) ProtoMessage()    {}
-func (*RangeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1}
-}
-func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RangeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RangeRequest.Merge(m, src)
-}
-func (m *RangeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *RangeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_RangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
-
-func (m *RangeRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *RangeRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *RangeRequest) GetLimit() int64 {
-	if m != nil {
-		return m.Limit
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
-	if m != nil {
-		return m.SortOrder
-	}
-	return RangeRequest_NONE
-}
-
-func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
-	if m != nil {
-		return m.SortTarget
-	}
-	return RangeRequest_KEY
-}
-
-func (m *RangeRequest) GetSerializable() bool {
-	if m != nil {
-		return m.Serializable
-	}
-	return false
-}
-
-func (m *RangeRequest) GetKeysOnly() bool {
-	if m != nil {
-		return m.KeysOnly
-	}
-	return false
-}
-
-func (m *RangeRequest) GetCountOnly() bool {
-	if m != nil {
-		return m.CountOnly
-	}
-	return false
-}
-
-func (m *RangeRequest) GetMinModRevision() int64 {
-	if m != nil {
-		return m.MinModRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMaxModRevision() int64 {
-	if m != nil {
-		return m.MaxModRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMinCreateRevision() int64 {
-	if m != nil {
-		return m.MinCreateRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMaxCreateRevision() int64 {
-	if m != nil {
-		return m.MaxCreateRevision
-	}
-	return 0
-}
-
-type RangeResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// kvs is the list of key-value pairs matched by the range request.
-	// kvs is empty when count is requested.
-	Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
-	// more indicates if there are more keys to return in the requested range.
-	More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
-	// count is set to the number of keys within the range when requested.
-	Count                int64    `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RangeResponse) Reset()         { *m = RangeResponse{} }
-func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
-func (*RangeResponse) ProtoMessage()    {}
-func (*RangeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{2}
-}
-func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RangeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RangeResponse.Merge(m, src)
-}
-func (m *RangeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *RangeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_RangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
-
-func (m *RangeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
-	if m != nil {
-		return m.Kvs
-	}
-	return nil
-}
-
-func (m *RangeResponse) GetMore() bool {
-	if m != nil {
-		return m.More
-	}
-	return false
-}
-
-func (m *RangeResponse) GetCount() int64 {
-	if m != nil {
-		return m.Count
-	}
-	return 0
-}
-
-type PutRequest struct {
-	// key is the key, in bytes, to put into the key-value store.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// value is the value, in bytes, to associate with the key in the key-value store.
-	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	// lease is the lease ID to associate with the key in the key-value store. A lease
-	// value of 0 indicates no lease.
-	Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
-	// If prev_kv is set, etcd gets the previous key-value pair before changing it.
-	// The previous key-value pair will be returned in the put response.
-	PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	// If ignore_value is set, etcd updates the key using its current value.
-	// Returns an error if the key does not exist.
-	IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
-	// If ignore_lease is set, etcd updates the key using its current lease.
-	// Returns an error if the key does not exist.
-	IgnoreLease          bool     `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PutRequest) Reset()         { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage()    {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{3}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *PutRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutRequest.Merge(m, src)
-}
-func (m *PutRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-func (m *PutRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *PutRequest) GetValue() []byte {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *PutRequest) GetLease() int64 {
-	if m != nil {
-		return m.Lease
-	}
-	return 0
-}
-
-func (m *PutRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-func (m *PutRequest) GetIgnoreValue() bool {
-	if m != nil {
-		return m.IgnoreValue
-	}
-	return false
-}
-
-func (m *PutRequest) GetIgnoreLease() bool {
-	if m != nil {
-		return m.IgnoreLease
-	}
-	return false
-}
-
-type PutResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// if prev_kv is set in the request, the previous key-value pair will be returned.
-	PrevKv               *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *PutResponse) Reset()         { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage()    {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{4}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *PutResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutResponse.Merge(m, src)
-}
-func (m *PutResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
-	if m != nil {
-		return m.PrevKv
-	}
-	return nil
-}
-
-type DeleteRangeRequest struct {
-	// key is the first key to delete in the range.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the key following the last key to delete for the range [key, range_end).
-	// If range_end is not given, the range is defined to contain only the key argument.
-	// If range_end is one bit larger than the given key, then the range is all the keys
-	// with the prefix (the given key).
-	// If range_end is '\0', the range is all keys greater than or equal to the key argument.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
-	// The previous key-value pairs will be returned in the delete response.
-	PrevKv               bool     `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DeleteRangeRequest) Reset()         { *m = DeleteRangeRequest{} }
-func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeRequest) ProtoMessage()    {}
-func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{5}
-}
-func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
-}
-func (m *DeleteRangeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
-
-func (m *DeleteRangeRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *DeleteRangeRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *DeleteRangeRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-type DeleteRangeResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// deleted is the number of keys deleted by the delete range request.
-	Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
-	// if prev_kv is set in the request, the previous key-value pairs will be returned.
-	PrevKvs              []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
-}
-
-func (m *DeleteRangeResponse) Reset()         { *m = DeleteRangeResponse{} }
-func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeResponse) ProtoMessage()    {}
-func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{6}
-}
-func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
-}
-func (m *DeleteRangeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
-
-func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *DeleteRangeResponse) GetDeleted() int64 {
-	if m != nil {
-		return m.Deleted
-	}
-	return 0
-}
-
-func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
-	if m != nil {
-		return m.PrevKvs
-	}
-	return nil
-}
-
-type RequestOp struct {
-	// request is a union of request types accepted by a transaction.
-	//
-	// Types that are valid to be assigned to Request:
-	//	*RequestOp_RequestRange
-	//	*RequestOp_RequestPut
-	//	*RequestOp_RequestDeleteRange
-	//	*RequestOp_RequestTxn
-	Request              isRequestOp_Request `protobuf_oneof:"request"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *RequestOp) Reset()         { *m = RequestOp{} }
-func (m *RequestOp) String() string { return proto.CompactTextString(m) }
-func (*RequestOp) ProtoMessage()    {}
-func (*RequestOp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{7}
-}
-func (m *RequestOp) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RequestOp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RequestOp.Merge(m, src)
-}
-func (m *RequestOp) XXX_Size() int {
-	return m.Size()
-}
-func (m *RequestOp) XXX_DiscardUnknown() {
-	xxx_messageInfo_RequestOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestOp proto.InternalMessageInfo
-
-type isRequestOp_Request interface {
-	isRequestOp_Request()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type RequestOp_RequestRange struct {
-	RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"`
-}
-type RequestOp_RequestPut struct {
-	RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"`
-}
-type RequestOp_RequestDeleteRange struct {
-	RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"`
-}
-type RequestOp_RequestTxn struct {
-	RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"`
-}
-
-func (*RequestOp_RequestRange) isRequestOp_Request()       {}
-func (*RequestOp_RequestPut) isRequestOp_Request()         {}
-func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
-func (*RequestOp_RequestTxn) isRequestOp_Request()         {}
-
-func (m *RequestOp) GetRequest() isRequestOp_Request {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestRange() *RangeRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
-		return x.RequestRange
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestPut() *PutRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
-		return x.RequestPut
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
-		return x.RequestDeleteRange
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestTxn() *TxnRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
-		return x.RequestTxn
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
-		(*RequestOp_RequestRange)(nil),
-		(*RequestOp_RequestPut)(nil),
-		(*RequestOp_RequestDeleteRange)(nil),
-		(*RequestOp_RequestTxn)(nil),
-	}
-}
-
-func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*RequestOp)
-	// request
-	switch x := m.Request.(type) {
-	case *RequestOp_RequestRange:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestRange); err != nil {
-			return err
-		}
-	case *RequestOp_RequestPut:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestPut); err != nil {
-			return err
-		}
-	case *RequestOp_RequestDeleteRange:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
-			return err
-		}
-	case *RequestOp_RequestTxn:
-		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestTxn); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*RequestOp)
-	switch tag {
-	case 1: // request.request_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(RangeRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestRange{msg}
-		return true, err
-	case 2: // request.request_put
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(PutRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestPut{msg}
-		return true, err
-	case 3: // request.request_delete_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(DeleteRangeRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestDeleteRange{msg}
-		return true, err
-	case 4: // request.request_txn
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(TxnRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestTxn{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _RequestOp_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*RequestOp)
-	// request
-	switch x := m.Request.(type) {
-	case *RequestOp_RequestRange:
-		s := proto.Size(x.RequestRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestPut:
-		s := proto.Size(x.RequestPut)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestDeleteRange:
-		s := proto.Size(x.RequestDeleteRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestTxn:
-		s := proto.Size(x.RequestTxn)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type ResponseOp struct {
-	// response is a union of response types returned by a transaction.
-	//
-	// Types that are valid to be assigned to Response:
-	//	*ResponseOp_ResponseRange
-	//	*ResponseOp_ResponsePut
-	//	*ResponseOp_ResponseDeleteRange
-	//	*ResponseOp_ResponseTxn
-	Response             isResponseOp_Response `protobuf_oneof:"response"`
-	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
-	XXX_unrecognized     []byte                `json:"-"`
-	XXX_sizecache        int32                 `json:"-"`
-}
-
-func (m *ResponseOp) Reset()         { *m = ResponseOp{} }
-func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
-func (*ResponseOp) ProtoMessage()    {}
-func (*ResponseOp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{8}
-}
-func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ResponseOp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ResponseOp.Merge(m, src)
-}
-func (m *ResponseOp) XXX_Size() int {
-	return m.Size()
-}
-func (m *ResponseOp) XXX_DiscardUnknown() {
-	xxx_messageInfo_ResponseOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
-
-type isResponseOp_Response interface {
-	isResponseOp_Response()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type ResponseOp_ResponseRange struct {
-	ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"`
-}
-type ResponseOp_ResponsePut struct {
-	ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"`
-}
-type ResponseOp_ResponseDeleteRange struct {
-	ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"`
-}
-type ResponseOp_ResponseTxn struct {
-	ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"`
-}
-
-func (*ResponseOp_ResponseRange) isResponseOp_Response()       {}
-func (*ResponseOp_ResponsePut) isResponseOp_Response()         {}
-func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponseTxn) isResponseOp_Response()         {}
-
-func (m *ResponseOp) GetResponse() isResponseOp_Response {
-	if m != nil {
-		return m.Response
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseRange() *RangeResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
-		return x.ResponseRange
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponsePut() *PutResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
-		return x.ResponsePut
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
-		return x.ResponseDeleteRange
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseTxn() *TxnResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
-		return x.ResponseTxn
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
-		(*ResponseOp_ResponseRange)(nil),
-		(*ResponseOp_ResponsePut)(nil),
-		(*ResponseOp_ResponseDeleteRange)(nil),
-		(*ResponseOp_ResponseTxn)(nil),
-	}
-}
-
-func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ResponseOp)
-	// response
-	switch x := m.Response.(type) {
-	case *ResponseOp_ResponseRange:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseRange); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponsePut:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponsePut); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponseDeleteRange:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponseTxn:
-		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseTxn); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ResponseOp)
-	switch tag {
-	case 1: // response.response_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(RangeResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseRange{msg}
-		return true, err
-	case 2: // response.response_put
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(PutResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponsePut{msg}
-		return true, err
-	case 3: // response.response_delete_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(DeleteRangeResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseDeleteRange{msg}
-		return true, err
-	case 4: // response.response_txn
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(TxnResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseTxn{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ResponseOp)
-	// response
-	switch x := m.Response.(type) {
-	case *ResponseOp_ResponseRange:
-		s := proto.Size(x.ResponseRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponsePut:
-		s := proto.Size(x.ResponsePut)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponseDeleteRange:
-		s := proto.Size(x.ResponseDeleteRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponseTxn:
-		s := proto.Size(x.ResponseTxn)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type Compare struct {
-	// result is logical comparison operation for this comparison.
-	Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
-	// target is the key-value field to inspect for the comparison.
-	Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
-	// key is the subject key for the comparison operation.
-	Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
-	// Types that are valid to be assigned to TargetUnion:
-	//	*Compare_Version
-	//	*Compare_CreateRevision
-	//	*Compare_ModRevision
-	//	*Compare_Value
-	//	*Compare_Lease
-	TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
-	// range_end compares the given target to all keys in the range [key, range_end).
-	// See RangeRequest for more details on key ranges.
-	RangeEnd             []byte   `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Compare) Reset()         { *m = Compare{} }
-func (m *Compare) String() string { return proto.CompactTextString(m) }
-func (*Compare) ProtoMessage()    {}
-func (*Compare) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9}
-}
-func (m *Compare) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Compare) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Compare.Merge(m, src)
-}
-func (m *Compare) XXX_Size() int {
-	return m.Size()
-}
-func (m *Compare) XXX_DiscardUnknown() {
-	xxx_messageInfo_Compare.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Compare proto.InternalMessageInfo
-
-type isCompare_TargetUnion interface {
-	isCompare_TargetUnion()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type Compare_Version struct {
-	Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
-}
-type Compare_CreateRevision struct {
-	CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
-}
-type Compare_ModRevision struct {
-	ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
-}
-type Compare_Value struct {
-	Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
-}
-type Compare_Lease struct {
-	Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
-}
-
-func (*Compare_Version) isCompare_TargetUnion()        {}
-func (*Compare_CreateRevision) isCompare_TargetUnion() {}
-func (*Compare_ModRevision) isCompare_TargetUnion()    {}
-func (*Compare_Value) isCompare_TargetUnion()          {}
-func (*Compare_Lease) isCompare_TargetUnion()          {}
-
-func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
-	if m != nil {
-		return m.TargetUnion
-	}
-	return nil
-}
-
-func (m *Compare) GetResult() Compare_CompareResult {
-	if m != nil {
-		return m.Result
-	}
-	return Compare_EQUAL
-}
-
-func (m *Compare) GetTarget() Compare_CompareTarget {
-	if m != nil {
-		return m.Target
-	}
-	return Compare_VERSION
-}
-
-func (m *Compare) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *Compare) GetVersion() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
-		return x.Version
-	}
-	return 0
-}
-
-func (m *Compare) GetCreateRevision() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
-		return x.CreateRevision
-	}
-	return 0
-}
-
-func (m *Compare) GetModRevision() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
-		return x.ModRevision
-	}
-	return 0
-}
-
-func (m *Compare) GetValue() []byte {
-	if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
-		return x.Value
-	}
-	return nil
-}
-
-func (m *Compare) GetLease() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
-		return x.Lease
-	}
-	return 0
-}
-
-func (m *Compare) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
-		(*Compare_Version)(nil),
-		(*Compare_CreateRevision)(nil),
-		(*Compare_ModRevision)(nil),
-		(*Compare_Value)(nil),
-		(*Compare_Lease)(nil),
-	}
-}
-
-func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*Compare)
-	// target_union
-	switch x := m.TargetUnion.(type) {
-	case *Compare_Version:
-		_ = b.EncodeVarint(4<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.Version))
-	case *Compare_CreateRevision:
-		_ = b.EncodeVarint(5<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.CreateRevision))
-	case *Compare_ModRevision:
-		_ = b.EncodeVarint(6<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.ModRevision))
-	case *Compare_Value:
-		_ = b.EncodeVarint(7<<3 | proto.WireBytes)
-		_ = b.EncodeRawBytes(x.Value)
-	case *Compare_Lease:
-		_ = b.EncodeVarint(8<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.Lease))
-	case nil:
-	default:
-		return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*Compare)
-	switch tag {
-	case 4: // target_union.version
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_Version{int64(x)}
-		return true, err
-	case 5: // target_union.create_revision
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_CreateRevision{int64(x)}
-		return true, err
-	case 6: // target_union.mod_revision
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_ModRevision{int64(x)}
-		return true, err
-	case 7: // target_union.value
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeRawBytes(true)
-		m.TargetUnion = &Compare_Value{x}
-		return true, err
-	case 8: // target_union.lease
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_Lease{int64(x)}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _Compare_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*Compare)
-	// target_union
-	switch x := m.TargetUnion.(type) {
-	case *Compare_Version:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.Version))
-	case *Compare_CreateRevision:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.CreateRevision))
-	case *Compare_ModRevision:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.ModRevision))
-	case *Compare_Value:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.Value)))
-		n += len(x.Value)
-	case *Compare_Lease:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.Lease))
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-type TxnRequest struct {
-	// compare is a list of predicates representing a conjunction of terms.
-	// If the comparisons succeed, then the success requests will be processed in order,
-	// and the response will contain their respective responses in order.
-	// If the comparisons fail, then the failure requests will be processed in order,
-	// and the response will contain their respective responses in order.
-	Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
-	// success is a list of requests which will be applied when compare evaluates to true.
-	Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
-	// failure is a list of requests which will be applied when compare evaluates to false.
-	Failure              []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
-}
-
-func (m *TxnRequest) Reset()         { *m = TxnRequest{} }
-func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
-func (*TxnRequest) ProtoMessage()    {}
-func (*TxnRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{10}
-}
-func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *TxnRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TxnRequest.Merge(m, src)
-}
-func (m *TxnRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *TxnRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_TxnRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
-
-func (m *TxnRequest) GetCompare() []*Compare {
-	if m != nil {
-		return m.Compare
-	}
-	return nil
-}
-
-func (m *TxnRequest) GetSuccess() []*RequestOp {
-	if m != nil {
-		return m.Success
-	}
-	return nil
-}
-
-func (m *TxnRequest) GetFailure() []*RequestOp {
-	if m != nil {
-		return m.Failure
-	}
-	return nil
-}
-
-type TxnResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// succeeded is set to true if the compare evaluated to true or false otherwise.
-	Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
-	// responses is a list of responses corresponding to the results from applying
-	// success if succeeded is true or failure if succeeded is false.
-	Responses            []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *TxnResponse) Reset()         { *m = TxnResponse{} }
-func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
-func (*TxnResponse) ProtoMessage()    {}
-func (*TxnResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{11}
-}
-func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *TxnResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TxnResponse.Merge(m, src)
-}
-func (m *TxnResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *TxnResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_TxnResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
-
-func (m *TxnResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *TxnResponse) GetSucceeded() bool {
-	if m != nil {
-		return m.Succeeded
-	}
-	return false
-}
-
-func (m *TxnResponse) GetResponses() []*ResponseOp {
-	if m != nil {
-		return m.Responses
-	}
-	return nil
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-type CompactionRequest struct {
-	// revision is the key-value store revision for the compaction operation.
-	Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
-	// physical is set so the RPC will wait until the compaction is physically
-	// applied to the local database such that compacted entries are totally
-	// removed from the backend database.
-	Physical             bool     `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *CompactionRequest) Reset()         { *m = CompactionRequest{} }
-func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
-func (*CompactionRequest) ProtoMessage()    {}
-func (*CompactionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{12}
-}
-func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *CompactionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompactionRequest.Merge(m, src)
-}
-func (m *CompactionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *CompactionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
-
-func (m *CompactionRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *CompactionRequest) GetPhysical() bool {
-	if m != nil {
-		return m.Physical
-	}
-	return false
-}
-
-type CompactionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *CompactionResponse) Reset()         { *m = CompactionResponse{} }
-func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
-func (*CompactionResponse) ProtoMessage()    {}
-func (*CompactionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{13}
-}
-func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *CompactionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompactionResponse.Merge(m, src)
-}
-func (m *CompactionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *CompactionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
-
-func (m *CompactionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type HashRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashRequest) Reset()         { *m = HashRequest{} }
-func (m *HashRequest) String() string { return proto.CompactTextString(m) }
-func (*HashRequest) ProtoMessage()    {}
-func (*HashRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{14}
-}
-func (m *HashRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashRequest.Merge(m, src)
-}
-func (m *HashRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashRequest proto.InternalMessageInfo
-
-type HashKVRequest struct {
-	// revision is the key-value store revision for the hash operation.
-	Revision             int64    `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashKVRequest) Reset()         { *m = HashKVRequest{} }
-func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
-func (*HashKVRequest) ProtoMessage()    {}
-func (*HashKVRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{15}
-}
-func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashKVRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashKVRequest.Merge(m, src)
-}
-func (m *HashKVRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashKVRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
-
-func (m *HashKVRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-type HashKVResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
-	Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
-	// compact_revision is the compacted revision of key-value store when hash begins.
-	CompactRevision      int64    `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashKVResponse) Reset()         { *m = HashKVResponse{} }
-func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
-func (*HashKVResponse) ProtoMessage()    {}
-func (*HashKVResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{16}
-}
-func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashKVResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashKVResponse.Merge(m, src)
-}
-func (m *HashKVResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashKVResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
-
-func (m *HashKVResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *HashKVResponse) GetHash() uint32 {
-	if m != nil {
-		return m.Hash
-	}
-	return 0
-}
-
-func (m *HashKVResponse) GetCompactRevision() int64 {
-	if m != nil {
-		return m.CompactRevision
-	}
-	return 0
-}
-
-type HashResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// hash is the hash value computed from the responding member's KV's backend.
-	Hash                 uint32   `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashResponse) Reset()         { *m = HashResponse{} }
-func (m *HashResponse) String() string { return proto.CompactTextString(m) }
-func (*HashResponse) ProtoMessage()    {}
-func (*HashResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{17}
-}
-func (m *HashResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashResponse.Merge(m, src)
-}
-func (m *HashResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashResponse proto.InternalMessageInfo
-
-func (m *HashResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *HashResponse) GetHash() uint32 {
-	if m != nil {
-		return m.Hash
-	}
-	return 0
-}
-
-type SnapshotRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *SnapshotRequest) Reset()         { *m = SnapshotRequest{} }
-func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
-func (*SnapshotRequest) ProtoMessage()    {}
-func (*SnapshotRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{18}
-}
-func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotRequest.Merge(m, src)
-}
-func (m *SnapshotRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
-
-type SnapshotResponse struct {
-	// header has the current key-value store information. The first header in the snapshot
-	// stream indicates the point in time of the snapshot.
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// remaining_bytes is the number of blob bytes to be sent after this message
-	RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
-	// blob contains the next chunk of the snapshot in the snapshot stream.
-	Blob                 []byte   `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *SnapshotResponse) Reset()         { *m = SnapshotResponse{} }
-func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
-func (*SnapshotResponse) ProtoMessage()    {}
-func (*SnapshotResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{19}
-}
-func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotResponse.Merge(m, src)
-}
-func (m *SnapshotResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
-
-func (m *SnapshotResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *SnapshotResponse) GetRemainingBytes() uint64 {
-	if m != nil {
-		return m.RemainingBytes
-	}
-	return 0
-}
-
-func (m *SnapshotResponse) GetBlob() []byte {
-	if m != nil {
-		return m.Blob
-	}
-	return nil
-}
-
-type WatchRequest struct {
-	// request_union is a request to either create a new watcher or cancel an existing watcher.
-	//
-	// Types that are valid to be assigned to RequestUnion:
-	//	*WatchRequest_CreateRequest
-	//	*WatchRequest_CancelRequest
-	//	*WatchRequest_ProgressRequest
-	RequestUnion         isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
-	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
-	XXX_unrecognized     []byte                      `json:"-"`
-	XXX_sizecache        int32                       `json:"-"`
-}
-
-func (m *WatchRequest) Reset()         { *m = WatchRequest{} }
-func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchRequest) ProtoMessage()    {}
-func (*WatchRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{20}
-}
-func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchRequest.Merge(m, src)
-}
-func (m *WatchRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
-
-type isWatchRequest_RequestUnion interface {
-	isWatchRequest_RequestUnion()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type WatchRequest_CreateRequest struct {
-	CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"`
-}
-type WatchRequest_CancelRequest struct {
-	CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"`
-}
-type WatchRequest_ProgressRequest struct {
-	ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"`
-}
-
-func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion()   {}
-func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion()   {}
-func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
-
-func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
-	if m != nil {
-		return m.RequestUnion
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
-		return x.CreateRequest
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
-		return x.CancelRequest
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
-		return x.ProgressRequest
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
-		(*WatchRequest_CreateRequest)(nil),
-		(*WatchRequest_CancelRequest)(nil),
-		(*WatchRequest_ProgressRequest)(nil),
-	}
-}
-
-func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*WatchRequest)
-	// request_union
-	switch x := m.RequestUnion.(type) {
-	case *WatchRequest_CreateRequest:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.CreateRequest); err != nil {
-			return err
-		}
-	case *WatchRequest_CancelRequest:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.CancelRequest); err != nil {
-			return err
-		}
-	case *WatchRequest_ProgressRequest:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ProgressRequest); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*WatchRequest)
-	switch tag {
-	case 1: // request_union.create_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchCreateRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_CreateRequest{msg}
-		return true, err
-	case 2: // request_union.cancel_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchCancelRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_CancelRequest{msg}
-		return true, err
-	case 3: // request_union.progress_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchProgressRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_ProgressRequest{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*WatchRequest)
-	// request_union
-	switch x := m.RequestUnion.(type) {
-	case *WatchRequest_CreateRequest:
-		s := proto.Size(x.CreateRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *WatchRequest_CancelRequest:
-		s := proto.Size(x.CancelRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *WatchRequest_ProgressRequest:
-		s := proto.Size(x.ProgressRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type WatchCreateRequest struct {
-	// key is the key to register for watching.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
-	// only the key argument is watched. If range_end is equal to '\0', all keys greater than
-	// or equal to the key argument are watched.
-	// If the range_end is one bit larger than the given key,
-	// then all keys with the prefix (the given key) will be watched.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
-	StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
-	// progress_notify is set so that the etcd server will periodically send a WatchResponse with
-	// no events to the new watcher if there are no recent events. It is useful when clients
-	// wish to recover a disconnected watcher starting from a recent known revision.
-	// The etcd server may decide how often it will send notifications based on current load.
-	ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
-	// filters filter the events at server side before it sends back to the watcher.
-	Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
-	// If prev_kv is set, created watcher gets the previous KV before the event happens.
-	// If the previous KV is already compacted, nothing will be returned.
-	PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	// If watch_id is provided and non-zero, it will be assigned to this watcher.
-	// Since creating a watcher in etcd is not a synchronous operation,
-	// this can be used ensure that ordering is correct when creating multiple
-	// watchers on the same stream. Creating a watcher with an ID already in
-	// use on the stream will cause an error to be returned.
-	WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	// fragment enables splitting large revisions into multiple watch responses.
-	Fragment             bool     `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchCreateRequest) Reset()         { *m = WatchCreateRequest{} }
-func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCreateRequest) ProtoMessage()    {}
-func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{21}
-}
-func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchCreateRequest.Merge(m, src)
-}
-func (m *WatchCreateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchCreateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
-
-func (m *WatchCreateRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetStartRevision() int64 {
-	if m != nil {
-		return m.StartRevision
-	}
-	return 0
-}
-
-func (m *WatchCreateRequest) GetProgressNotify() bool {
-	if m != nil {
-		return m.ProgressNotify
-	}
-	return false
-}
-
-func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
-	if m != nil {
-		return m.Filters
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-func (m *WatchCreateRequest) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-func (m *WatchCreateRequest) GetFragment() bool {
-	if m != nil {
-		return m.Fragment
-	}
-	return false
-}
-
-type WatchCancelRequest struct {
-	// watch_id is the watcher id to cancel so that no more events are transmitted.
-	WatchId              int64    `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchCancelRequest) Reset()         { *m = WatchCancelRequest{} }
-func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCancelRequest) ProtoMessage()    {}
-func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{22}
-}
-func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchCancelRequest.Merge(m, src)
-}
-func (m *WatchCancelRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchCancelRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
-
-func (m *WatchCancelRequest) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-type WatchProgressRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchProgressRequest) Reset()         { *m = WatchProgressRequest{} }
-func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchProgressRequest) ProtoMessage()    {}
-func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{23}
-}
-func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchProgressRequest.Merge(m, src)
-}
-func (m *WatchProgressRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchProgressRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
-
-type WatchResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// watch_id is the ID of the watcher that corresponds to the response.
-	WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	// created is set to true if the response is for a create watch request.
-	// The client should record the watch_id and expect to receive events for
-	// the created watcher from the same stream.
-	// All events sent to the created watcher will attach with the same watch_id.
-	Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
-	// canceled is set to true if the response is for a cancel watch request.
-	// No further events will be sent to the canceled watcher.
-	Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
-	// compact_revision is set to the minimum index if a watcher tries to watch
-	// at a compacted index.
-	//
-	// This happens when creating a watcher at a compacted revision or the watcher cannot
-	// catch up with the progress of the key-value store.
-	//
-	// The client should treat the watcher as canceled and should not try to create any
-	// watcher with the same start_revision again.
-	CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
-	// cancel_reason indicates the reason for canceling the watcher.
-	CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
-	// framgment is true if large watch response was split over multiple responses.
-	Fragment             bool            `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
-	Events               []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *WatchResponse) Reset()         { *m = WatchResponse{} }
-func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
-func (*WatchResponse) ProtoMessage()    {}
-func (*WatchResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{24}
-}
-func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchResponse.Merge(m, src)
-}
-func (m *WatchResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
-
-func (m *WatchResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *WatchResponse) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-func (m *WatchResponse) GetCreated() bool {
-	if m != nil {
-		return m.Created
-	}
-	return false
-}
-
-func (m *WatchResponse) GetCanceled() bool {
-	if m != nil {
-		return m.Canceled
-	}
-	return false
-}
-
-func (m *WatchResponse) GetCompactRevision() int64 {
-	if m != nil {
-		return m.CompactRevision
-	}
-	return 0
-}
-
-func (m *WatchResponse) GetCancelReason() string {
-	if m != nil {
-		return m.CancelReason
-	}
-	return ""
-}
-
-func (m *WatchResponse) GetFragment() bool {
-	if m != nil {
-		return m.Fragment
-	}
-	return false
-}
-
-func (m *WatchResponse) GetEvents() []*mvccpb.Event {
-	if m != nil {
-		return m.Events
-	}
-	return nil
-}
-
-type LeaseGrantRequest struct {
-	// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
-	TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
-	ID                   int64    `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseGrantRequest) Reset()         { *m = LeaseGrantRequest{} }
-func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantRequest) ProtoMessage()    {}
-func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{25}
-}
-func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
-}
-func (m *LeaseGrantRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
-
-func (m *LeaseGrantRequest) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseGrantRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseGrantResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID for the granted lease.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the server chosen lease time-to-live in seconds.
-	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	Error                string   `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseGrantResponse) Reset()         { *m = LeaseGrantResponse{} }
-func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantResponse) ProtoMessage()    {}
-func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{26}
-}
-func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
-}
-func (m *LeaseGrantResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
-
-func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseGrantResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseGrantResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseGrantResponse) GetError() string {
-	if m != nil {
-		return m.Error
-	}
-	return ""
-}
-
-type LeaseRevokeRequest struct {
-	// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseRevokeRequest) Reset()         { *m = LeaseRevokeRequest{} }
-func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeRequest) ProtoMessage()    {}
-func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{27}
-}
-func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
-}
-func (m *LeaseRevokeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
-
-func (m *LeaseRevokeRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseRevokeResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *LeaseRevokeResponse) Reset()         { *m = LeaseRevokeResponse{} }
-func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeResponse) ProtoMessage()    {}
-func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{28}
-}
-func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
-}
-func (m *LeaseRevokeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
-
-func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type LeaseKeepAliveRequest struct {
-	// ID is the lease ID for the lease to keep alive.
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseKeepAliveRequest) Reset()         { *m = LeaseKeepAliveRequest{} }
-func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveRequest) ProtoMessage()    {}
-func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{29}
-}
-func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
-}
-func (m *LeaseKeepAliveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseKeepAliveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID from the keep alive request.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the new time-to-live for the lease.
-	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseKeepAliveResponse) Reset()         { *m = LeaseKeepAliveResponse{} }
-func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveResponse) ProtoMessage()    {}
-func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{30}
-}
-func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
-}
-func (m *LeaseKeepAliveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseKeepAliveResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseKeepAliveResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-type LeaseTimeToLiveRequest struct {
-	// ID is the lease ID for the lease.
-	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// keys is true to query all the keys attached to this lease.
-	Keys                 bool     `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseTimeToLiveRequest) Reset()         { *m = LeaseTimeToLiveRequest{} }
-func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveRequest) ProtoMessage()    {}
-func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{31}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveRequest) GetKeys() bool {
-	if m != nil {
-		return m.Keys
-	}
-	return false
-}
-
-type LeaseTimeToLiveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID from the keep alive request.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
-	TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
-	GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
-	// Keys is the list of keys attached to this lease.
-	Keys                 [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseTimeToLiveResponse) Reset()         { *m = LeaseTimeToLiveResponse{} }
-func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveResponse) ProtoMessage()    {}
-func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{32}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseTimeToLiveResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
-	if m != nil {
-		return m.GrantedTTL
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
-	if m != nil {
-		return m.Keys
-	}
-	return nil
-}
-
-type LeaseLeasesRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseLeasesRequest) Reset()         { *m = LeaseLeasesRequest{} }
-func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesRequest) ProtoMessage()    {}
-func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{33}
-}
-func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
-}
-func (m *LeaseLeasesRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
-
-type LeaseStatus struct {
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseStatus) Reset()         { *m = LeaseStatus{} }
-func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
-func (*LeaseStatus) ProtoMessage()    {}
-func (*LeaseStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{34}
-}
-func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseStatus) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseStatus.Merge(m, src)
-}
-func (m *LeaseStatus) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseStatus) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
-
-func (m *LeaseStatus) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseLeasesResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Leases               []*LeaseStatus  `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *LeaseLeasesResponse) Reset()         { *m = LeaseLeasesResponse{} }
-func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesResponse) ProtoMessage()    {}
-func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{35}
-}
-func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
-}
-func (m *LeaseLeasesResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
-
-func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
-	if m != nil {
-		return m.Leases
-	}
-	return nil
-}
-
-type Member struct {
-	// ID is the member ID for this member.
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
-	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
-	// peerURLs is the list of URLs the member exposes to the cluster for communication.
-	PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
-	ClientURLs           []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Member) Reset()         { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage()    {}
-func (*Member) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{36}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Member) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
-	return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
-	xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-func (m *Member) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *Member) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Member) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-func (m *Member) GetClientURLs() []string {
-	if m != nil {
-		return m.ClientURLs
-	}
-	return nil
-}
-
-type MemberAddRequest struct {
-	// peerURLs is the list of URLs the added member will use to communicate with the cluster.
-	PeerURLs             []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberAddRequest) Reset()         { *m = MemberAddRequest{} }
-func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberAddRequest) ProtoMessage()    {}
-func (*MemberAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{37}
-}
-func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberAddRequest.Merge(m, src)
-}
-func (m *MemberAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
-
-func (m *MemberAddRequest) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-type MemberAddResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// member is the member information for the added member.
-	Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
-	// members is a list of all members after adding the new member.
-	Members              []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberAddResponse) Reset()         { *m = MemberAddResponse{} }
-func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberAddResponse) ProtoMessage()    {}
-func (*MemberAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{38}
-}
-func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberAddResponse.Merge(m, src)
-}
-func (m *MemberAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
-
-func (m *MemberAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberAddResponse) GetMember() *Member {
-	if m != nil {
-		return m.Member
-	}
-	return nil
-}
-
-func (m *MemberAddResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberRemoveRequest struct {
-	// ID is the member ID of the member to remove.
-	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberRemoveRequest) Reset()         { *m = MemberRemoveRequest{} }
-func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveRequest) ProtoMessage()    {}
-func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{39}
-}
-func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
-}
-func (m *MemberRemoveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
-
-func (m *MemberRemoveRequest) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type MemberRemoveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members after removing the member.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberRemoveResponse) Reset()         { *m = MemberRemoveResponse{} }
-func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveResponse) ProtoMessage()    {}
-func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{40}
-}
-func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
-}
-func (m *MemberRemoveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
-
-func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberRemoveResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberUpdateRequest struct {
-	// ID is the member ID of the member to update.
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// peerURLs is the new list of URLs the member will use to communicate with the cluster.
-	PeerURLs             []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberUpdateRequest) Reset()         { *m = MemberUpdateRequest{} }
-func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateRequest) ProtoMessage()    {}
-func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{41}
-}
-func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
-}
-func (m *MemberUpdateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
-
-func (m *MemberUpdateRequest) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *MemberUpdateRequest) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-type MemberUpdateResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members after updating the member.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberUpdateResponse) Reset()         { *m = MemberUpdateResponse{} }
-func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateResponse) ProtoMessage()    {}
-func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{42}
-}
-func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
-}
-func (m *MemberUpdateResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
-
-func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberUpdateResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberListRequest) Reset()         { *m = MemberListRequest{} }
-func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberListRequest) ProtoMessage()    {}
-func (*MemberListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{43}
-}
-func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberListRequest.Merge(m, src)
-}
-func (m *MemberListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
-
-type MemberListResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members associated with the cluster.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberListResponse) Reset()         { *m = MemberListResponse{} }
-func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberListResponse) ProtoMessage()    {}
-func (*MemberListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{44}
-}
-func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberListResponse.Merge(m, src)
-}
-func (m *MemberListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
-
-func (m *MemberListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberListResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type DefragmentRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DefragmentRequest) Reset()         { *m = DefragmentRequest{} }
-func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
-func (*DefragmentRequest) ProtoMessage()    {}
-func (*DefragmentRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{45}
-}
-func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DefragmentRequest.Merge(m, src)
-}
-func (m *DefragmentRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *DefragmentRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
-
-type DefragmentResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *DefragmentResponse) Reset()         { *m = DefragmentResponse{} }
-func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
-func (*DefragmentResponse) ProtoMessage()    {}
-func (*DefragmentResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{46}
-}
-func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DefragmentResponse.Merge(m, src)
-}
-func (m *DefragmentResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *DefragmentResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
-
-func (m *DefragmentResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type MoveLeaderRequest struct {
-	// targetID is the node ID for the new leader.
-	TargetID             uint64   `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MoveLeaderRequest) Reset()         { *m = MoveLeaderRequest{} }
-func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderRequest) ProtoMessage()    {}
-func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{47}
-}
-func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
-}
-func (m *MoveLeaderRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
-
-func (m *MoveLeaderRequest) GetTargetID() uint64 {
-	if m != nil {
-		return m.TargetID
-	}
-	return 0
-}
-
-type MoveLeaderResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *MoveLeaderResponse) Reset()         { *m = MoveLeaderResponse{} }
-func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderResponse) ProtoMessage()    {}
-func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{48}
-}
-func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
-}
-func (m *MoveLeaderResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
-
-func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AlarmRequest struct {
-	// action is the kind of alarm request to issue. The action
-	// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
-	// raised alarm.
-	Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
-	// memberID is the ID of the member associated with the alarm. If memberID is 0, the
-	// alarm request covers all members.
-	MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
-	// alarm is the type of alarm to consider for this request.
-	Alarm                AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *AlarmRequest) Reset()         { *m = AlarmRequest{} }
-func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
-func (*AlarmRequest) ProtoMessage()    {}
-func (*AlarmRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{49}
-}
-func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmRequest.Merge(m, src)
-}
-func (m *AlarmRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
-
-func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
-	if m != nil {
-		return m.Action
-	}
-	return AlarmRequest_GET
-}
-
-func (m *AlarmRequest) GetMemberID() uint64 {
-	if m != nil {
-		return m.MemberID
-	}
-	return 0
-}
-
-func (m *AlarmRequest) GetAlarm() AlarmType {
-	if m != nil {
-		return m.Alarm
-	}
-	return AlarmType_NONE
-}
-
-type AlarmMember struct {
-	// memberID is the ID of the member associated with the raised alarm.
-	MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
-	// alarm is the type of alarm which has been raised.
-	Alarm                AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *AlarmMember) Reset()         { *m = AlarmMember{} }
-func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
-func (*AlarmMember) ProtoMessage()    {}
-func (*AlarmMember) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{50}
-}
-func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmMember) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmMember.Merge(m, src)
-}
-func (m *AlarmMember) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmMember) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmMember.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
-
-func (m *AlarmMember) GetMemberID() uint64 {
-	if m != nil {
-		return m.MemberID
-	}
-	return 0
-}
-
-func (m *AlarmMember) GetAlarm() AlarmType {
-	if m != nil {
-		return m.Alarm
-	}
-	return AlarmType_NONE
-}
-
-type AlarmResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// alarms is a list of alarms associated with the alarm request.
-	Alarms               []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *AlarmResponse) Reset()         { *m = AlarmResponse{} }
-func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
-func (*AlarmResponse) ProtoMessage()    {}
-func (*AlarmResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{51}
-}
-func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmResponse.Merge(m, src)
-}
-func (m *AlarmResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
-
-func (m *AlarmResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AlarmResponse) GetAlarms() []*AlarmMember {
-	if m != nil {
-		return m.Alarms
-	}
-	return nil
-}
-
-type StatusRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
-func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
-func (*StatusRequest) ProtoMessage()    {}
-func (*StatusRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{52}
-}
-func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StatusRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StatusRequest.Merge(m, src)
-}
-func (m *StatusRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *StatusRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
-
-type StatusResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// version is the cluster protocol version used by the responding member.
-	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
-	// dbSize is the size of the backend database, in bytes, of the responding member.
-	DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
-	// leader is the member ID which the responding member believes is the current leader.
-	Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
-	// raftIndex is the current raft index of the responding member.
-	RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
-	// raftTerm is the current raft term of the responding member.
-	RaftTerm             uint64   `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
-func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
-func (*StatusResponse) ProtoMessage()    {}
-func (*StatusResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{53}
-}
-func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StatusResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StatusResponse.Merge(m, src)
-}
-func (m *StatusResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *StatusResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
-
-func (m *StatusResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *StatusResponse) GetVersion() string {
-	if m != nil {
-		return m.Version
-	}
-	return ""
-}
-
-func (m *StatusResponse) GetDbSize() int64 {
-	if m != nil {
-		return m.DbSize
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetLeader() uint64 {
-	if m != nil {
-		return m.Leader
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetRaftIndex() uint64 {
-	if m != nil {
-		return m.RaftIndex
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetRaftTerm() uint64 {
-	if m != nil {
-		return m.RaftTerm
-	}
-	return 0
-}
-
-type AuthEnableRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthEnableRequest) Reset()         { *m = AuthEnableRequest{} }
-func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableRequest) ProtoMessage()    {}
-func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{54}
-}
-func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthEnableRequest.Merge(m, src)
-}
-func (m *AuthEnableRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthEnableRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
-
-type AuthDisableRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthDisableRequest) Reset()         { *m = AuthDisableRequest{} }
-func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableRequest) ProtoMessage()    {}
-func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{55}
-}
-func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthDisableRequest.Merge(m, src)
-}
-func (m *AuthDisableRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthDisableRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
-
-type AuthenticateRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthenticateRequest) Reset()         { *m = AuthenticateRequest{} }
-func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateRequest) ProtoMessage()    {}
-func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{56}
-}
-func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthenticateRequest.Merge(m, src)
-}
-func (m *AuthenticateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthenticateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
-
-func (m *AuthenticateRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthenticateRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserAddRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserAddRequest) Reset()         { *m = AuthUserAddRequest{} }
-func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddRequest) ProtoMessage()    {}
-func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{57}
-}
-func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
-}
-func (m *AuthUserAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
-
-func (m *AuthUserAddRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserAddRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserGetRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserGetRequest) Reset()         { *m = AuthUserGetRequest{} }
-func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetRequest) ProtoMessage()    {}
-func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{58}
-}
-func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
-}
-func (m *AuthUserGetRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
-
-func (m *AuthUserGetRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthUserDeleteRequest struct {
-	// name is the name of the user to delete.
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserDeleteRequest) Reset()         { *m = AuthUserDeleteRequest{} }
-func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteRequest) ProtoMessage()    {}
-func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{59}
-}
-func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
-}
-func (m *AuthUserDeleteRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthUserDeleteRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthUserChangePasswordRequest struct {
-	// name is the name of the user whose password is being changed.
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	// password is the new password for the user.
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserChangePasswordRequest) Reset()         { *m = AuthUserChangePasswordRequest{} }
-func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordRequest) ProtoMessage()    {}
-func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{60}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserChangePasswordRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserGrantRoleRequest struct {
-	// user is the name of the user which should be granted a given role.
-	User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
-	// role is the name of the role to grant to the user.
-	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserGrantRoleRequest) Reset()         { *m = AuthUserGrantRoleRequest{} }
-func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleRequest) ProtoMessage()    {}
-func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{61}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleRequest) GetUser() string {
-	if m != nil {
-		return m.User
-	}
-	return ""
-}
-
-func (m *AuthUserGrantRoleRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthUserRevokeRoleRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleRequest) Reset()         { *m = AuthUserRevokeRoleRequest{} }
-func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleRequest) ProtoMessage()    {}
-func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{62}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserRevokeRoleRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthRoleAddRequest struct {
-	// name is the name of the role to add to the authentication system.
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleAddRequest) Reset()         { *m = AuthRoleAddRequest{} }
-func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddRequest) ProtoMessage()    {}
-func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{63}
-}
-func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
-}
-func (m *AuthRoleAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
-
-func (m *AuthRoleAddRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthRoleGetRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleGetRequest) Reset()         { *m = AuthRoleGetRequest{} }
-func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetRequest) ProtoMessage()    {}
-func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{64}
-}
-func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
-}
-func (m *AuthRoleGetRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGetRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthUserListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserListRequest) Reset()         { *m = AuthUserListRequest{} }
-func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListRequest) ProtoMessage()    {}
-func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{65}
-}
-func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserListRequest.Merge(m, src)
-}
-func (m *AuthUserListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
-
-type AuthRoleListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleListRequest) Reset()         { *m = AuthRoleListRequest{} }
-func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListRequest) ProtoMessage()    {}
-func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{66}
-}
-func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
-}
-func (m *AuthRoleListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
-
-type AuthRoleDeleteRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleDeleteRequest) Reset()         { *m = AuthRoleDeleteRequest{} }
-func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteRequest) ProtoMessage()    {}
-func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{67}
-}
-func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
-}
-func (m *AuthRoleDeleteRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthRoleGrantPermissionRequest struct {
-	// name is the name of the role which will be granted the permission.
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	// perm is the permission to grant to the role.
-	Perm                 *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionRequest) Reset()         { *m = AuthRoleGrantPermissionRequest{} }
-func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionRequest) ProtoMessage()    {}
-func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{68}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
-	if m != nil {
-		return m.Perm
-	}
-	return nil
-}
-
-type AuthRoleRevokePermissionRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
-	RangeEnd             string   `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionRequest) Reset()         { *m = AuthRoleRevokePermissionRequest{} }
-func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionRequest) ProtoMessage()    {}
-func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{69}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetKey() string {
-	if m != nil {
-		return m.Key
-	}
-	return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return ""
-}
-
-type AuthEnableResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthEnableResponse) Reset()         { *m = AuthEnableResponse{} }
-func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableResponse) ProtoMessage()    {}
-func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{70}
-}
-func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthEnableResponse.Merge(m, src)
-}
-func (m *AuthEnableResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthEnableResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
-
-func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthDisableResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthDisableResponse) Reset()         { *m = AuthDisableResponse{} }
-func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableResponse) ProtoMessage()    {}
-func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{71}
-}
-func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthDisableResponse.Merge(m, src)
-}
-func (m *AuthDisableResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthDisableResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
-
-func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthenticateResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// token is an authorized token that can be used in succeeding RPCs
-	Token                string   `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthenticateResponse) Reset()         { *m = AuthenticateResponse{} }
-func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateResponse) ProtoMessage()    {}
-func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{72}
-}
-func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthenticateResponse.Merge(m, src)
-}
-func (m *AuthenticateResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthenticateResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
-
-func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthenticateResponse) GetToken() string {
-	if m != nil {
-		return m.Token
-	}
-	return ""
-}
-
-type AuthUserAddResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserAddResponse) Reset()         { *m = AuthUserAddResponse{} }
-func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddResponse) ProtoMessage()    {}
-func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{73}
-}
-func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
-}
-func (m *AuthUserAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
-
-func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserGetResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserGetResponse) Reset()         { *m = AuthUserGetResponse{} }
-func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetResponse) ProtoMessage()    {}
-func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{74}
-}
-func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
-}
-func (m *AuthUserGetResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
-
-func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthUserGetResponse) GetRoles() []string {
-	if m != nil {
-		return m.Roles
-	}
-	return nil
-}
-
-type AuthUserDeleteResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserDeleteResponse) Reset()         { *m = AuthUserDeleteResponse{} }
-func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteResponse) ProtoMessage()    {}
-func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{75}
-}
-func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
-}
-func (m *AuthUserDeleteResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserChangePasswordResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserChangePasswordResponse) Reset()         { *m = AuthUserChangePasswordResponse{} }
-func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordResponse) ProtoMessage()    {}
-func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{76}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserGrantRoleResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserGrantRoleResponse) Reset()         { *m = AuthUserGrantRoleResponse{} }
-func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleResponse) ProtoMessage()    {}
-func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{77}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserRevokeRoleResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleResponse) Reset()         { *m = AuthUserRevokeRoleResponse{} }
-func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleResponse) ProtoMessage()    {}
-func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{78}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleAddResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleAddResponse) Reset()         { *m = AuthRoleAddResponse{} }
-func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddResponse) ProtoMessage()    {}
-func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{79}
-}
-func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
-}
-func (m *AuthRoleAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
-
-func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleGetResponse struct {
-	Header               *ResponseHeader      `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Perm                 []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
-	XXX_unrecognized     []byte               `json:"-"`
-	XXX_sizecache        int32                `json:"-"`
-}
-
-func (m *AuthRoleGetResponse) Reset()         { *m = AuthRoleGetResponse{} }
-func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetResponse) ProtoMessage()    {}
-func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{80}
-}
-func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
-}
-func (m *AuthRoleGetResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
-	if m != nil {
-		return m.Perm
-	}
-	return nil
-}
-
-type AuthRoleListResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleListResponse) Reset()         { *m = AuthRoleListResponse{} }
-func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListResponse) ProtoMessage()    {}
-func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{81}
-}
-func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
-}
-func (m *AuthRoleListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
-
-func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthRoleListResponse) GetRoles() []string {
-	if m != nil {
-		return m.Roles
-	}
-	return nil
-}
-
-type AuthUserListResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Users                []string        `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserListResponse) Reset()         { *m = AuthUserListResponse{} }
-func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListResponse) ProtoMessage()    {}
-func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{82}
-}
-func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserListResponse.Merge(m, src)
-}
-func (m *AuthUserListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
-
-func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthUserListResponse) GetUsers() []string {
-	if m != nil {
-		return m.Users
-	}
-	return nil
-}
-
-type AuthRoleDeleteResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleDeleteResponse) Reset()         { *m = AuthRoleDeleteResponse{} }
-func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteResponse) ProtoMessage()    {}
-func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{83}
-}
-func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
-}
-func (m *AuthRoleDeleteResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleGrantPermissionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionResponse) Reset()         { *m = AuthRoleGrantPermissionResponse{} }
-func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionResponse) ProtoMessage()    {}
-func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{84}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleRevokePermissionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionResponse) Reset()         { *m = AuthRoleRevokePermissionResponse{} }
-func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionResponse) ProtoMessage()    {}
-func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{85}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
-	proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
-	proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
-	proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
-	proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
-	proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
-	proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
-	proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
-	proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
-	proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
-	proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
-	proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
-	proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
-	proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
-	proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
-	proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
-	proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
-	proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
-	proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
-	proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
-	proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
-	proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
-	proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
-	proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
-	proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
-	proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
-	proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
-	proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
-	proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
-	proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
-	proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
-	proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
-	proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
-	proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
-	proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
-	proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
-	proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
-	proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
-	proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
-	proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
-	proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
-	proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
-	proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
-	proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
-	proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
-	proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
-	proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
-	proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
-	proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
-	proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
-	proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
-	proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
-	proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
-	proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
-	proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
-	proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
-	proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
-	proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
-	proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
-	proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
-	proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
-	proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
-	proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
-	proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
-	proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
-	proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
-	proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
-	proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
-	proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
-	proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
-	proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
-	proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
-	proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
-	proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
-	proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
-	proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
-	proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
-	proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
-	proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
-	proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
-	proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
-	proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
-	proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
-	proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
-	proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
-	proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
-	proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
-	proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
-	proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
-	proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
-	proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
-	proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
-	proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
-}
-
-func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
-
-var fileDescriptor_77a6da22d6a3feb1 = []byte{
-	// 3711 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47,
-	0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd,
-	0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4,
-	0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22,
-	0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff,
-	0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6,
-	0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf,
-	0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9,
-	0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d,
-	0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd,
-	0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f,
-	0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6,
-	0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f,
-	0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f,
-	0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf,
-	0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9,
-	0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3,
-	0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c,
-	0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42,
-	0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2,
-	0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde,
-	0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae,
-	0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c,
-	0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f,
-	0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23,
-	0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d,
-	0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62,
-	0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6,
-	0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21,
-	0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90,
-	0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8,
-	0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97,
-	0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67,
-	0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8,
-	0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1,
-	0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d,
-	0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95,
-	0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64,
-	0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17,
-	0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d,
-	0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61,
-	0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a,
-	0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62,
-	0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19,
-	0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93,
-	0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2,
-	0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51,
-	0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24,
-	0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13,
-	0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d,
-	0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc,
-	0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32,
-	0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa,
-	0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34,
-	0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92,
-	0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43,
-	0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05,
-	0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a,
-	0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0,
-	0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53,
-	0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2,
-	0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75,
-	0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42,
-	0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90,
-	0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e,
-	0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15,
-	0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8,
-	0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a,
-	0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9,
-	0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e,
-	0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64,
-	0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c,
-	0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72,
-	0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c,
-	0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74,
-	0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41,
-	0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d,
-	0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac,
-	0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3,
-	0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00,
-	0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0,
-	0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2,
-	0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08,
-	0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c,
-	0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58,
-	0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71,
-	0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7,
-	0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72,
-	0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12,
-	0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02,
-	0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0,
-	0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf,
-	0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9,
-	0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38,
-	0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35,
-	0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e,
-	0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f,
-	0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2,
-	0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65,
-	0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03,
-	0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89,
-	0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c,
-	0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26,
-	0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46,
-	0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5,
-	0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd,
-	0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90,
-	0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c,
-	0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d,
-	0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba,
-	0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9,
-	0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8,
-	0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf,
-	0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e,
-	0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5,
-	0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b,
-	0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56,
-	0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2,
-	0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd,
-	0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb,
-	0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27,
-	0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63,
-	0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2,
-	0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73,
-	0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4,
-	0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13,
-	0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40,
-	0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b,
-	0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde,
-	0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76,
-	0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f,
-	0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4,
-	0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7,
-	0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc,
-	0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef,
-	0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f,
-	0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff,
-	0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d,
-	0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf,
-	0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd,
-	0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c,
-	0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a,
-	0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62,
-	0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28,
-	0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4,
-	0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28,
-	0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23,
-	0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc,
-	0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda,
-	0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad,
-	0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60,
-	0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef,
-	0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa,
-	0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b,
-	0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56,
-	0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c,
-	0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c,
-	0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae,
-	0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f,
-	0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74,
-	0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20,
-	0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f,
-	0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3,
-	0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a,
-	0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56,
-	0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f,
-	0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6,
-	0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce,
-	0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4,
-	0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14,
-	0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c,
-	0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e,
-	0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc,
-	0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac,
-	0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9,
-	0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a,
-	0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8,
-	0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2,
-	0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14,
-	0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31,
-	0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca,
-	0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0,
-	0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89,
-	0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8,
-	0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97,
-	0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc,
-	0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0,
-	0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4,
-	0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55,
-	0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c,
-	0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0,
-	0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f,
-	0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52,
-	0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a,
-	0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02,
-	0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85,
-	0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99,
-	0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11,
-	0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d,
-	0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1,
-	0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0,
-	0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06,
-	0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe,
-	0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27,
-	0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf,
-	0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b,
-	0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a,
-	0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89,
-	0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae,
-	0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e,
-	0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33,
-	0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54,
-	0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88,
-	0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e,
-	0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87,
-	0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f,
-	0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34,
-	0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2,
-	0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2,
-	0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4,
-	0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca,
-	0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff,
-	0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9,
-	0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86,
-	0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1,
-	0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71,
-	0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e,
-	0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc,
-	0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b,
-	0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6,
-	0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd,
-	0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// KVClient is the client API for KV service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type KVClient interface {
-	// Range gets the keys in the range from the key-value store.
-	Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
-	// Put puts the given key into the key-value store.
-	// A put request increments the revision of the key-value store
-	// and generates one event in the event history.
-	Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
-	// DeleteRange deletes the given range from the key-value store.
-	// A delete request increments the revision of the key-value store
-	// and generates a delete event in the event history for every deleted key.
-	DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
-	// Txn processes multiple requests in a single transaction.
-	// A txn request increments the revision of the key-value store
-	// and generates events with the same revision for every completed request.
-	// It is not allowed to modify the same key several times within one txn.
-	Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
-	// Compact compacts the event history in the etcd key-value store. The key-value
-	// store should be periodically compacted or the event history will continue to grow
-	// indefinitely.
-	Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
-}
-
-type kVClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewKVClient(cc *grpc.ClientConn) KVClient {
-	return &kVClient{cc}
-}
-
-func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
-	out := new(RangeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
-	out := new(PutResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
-	out := new(DeleteRangeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
-	out := new(TxnResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
-	out := new(CompactionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// KVServer is the server API for KV service.
-type KVServer interface {
-	// Range gets the keys in the range from the key-value store.
-	Range(context.Context, *RangeRequest) (*RangeResponse, error)
-	// Put puts the given key into the key-value store.
-	// A put request increments the revision of the key-value store
-	// and generates one event in the event history.
-	Put(context.Context, *PutRequest) (*PutResponse, error)
-	// DeleteRange deletes the given range from the key-value store.
-	// A delete request increments the revision of the key-value store
-	// and generates a delete event in the event history for every deleted key.
-	DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
-	// Txn processes multiple requests in a single transaction.
-	// A txn request increments the revision of the key-value store
-	// and generates events with the same revision for every completed request.
-	// It is not allowed to modify the same key several times within one txn.
-	Txn(context.Context, *TxnRequest) (*TxnResponse, error)
-	// Compact compacts the event history in the etcd key-value store. The key-value
-	// store should be periodically compacted or the event history will continue to grow
-	// indefinitely.
-	Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
-}
-
-// UnimplementedKVServer can be embedded to have forward compatible implementations.
-type UnimplementedKVServer struct {
-}
-
-func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
-}
-func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
-}
-func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
-}
-func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
-}
-func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
-}
-
-func RegisterKVServer(s *grpc.Server, srv KVServer) {
-	s.RegisterService(&_KV_serviceDesc, srv)
-}
-
-func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(RangeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Range(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Range",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Range(ctx, req.(*RangeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(PutRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Put(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Put",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Put(ctx, req.(*PutRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DeleteRangeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).DeleteRange(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/DeleteRange",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(TxnRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Txn(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Txn",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CompactionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Compact(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Compact",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _KV_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.KV",
-	HandlerType: (*KVServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "Range",
-			Handler:    _KV_Range_Handler,
-		},
-		{
-			MethodName: "Put",
-			Handler:    _KV_Put_Handler,
-		},
-		{
-			MethodName: "DeleteRange",
-			Handler:    _KV_DeleteRange_Handler,
-		},
-		{
-			MethodName: "Txn",
-			Handler:    _KV_Txn_Handler,
-		},
-		{
-			MethodName: "Compact",
-			Handler:    _KV_Compact_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-// WatchClient is the client API for Watch service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type WatchClient interface {
-	// Watch watches for events happening or that have happened. Both input and output
-	// are streams; the input stream is for creating and canceling watchers and the output
-	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-	// for several watches at once. The entire event history can be watched starting from the
-	// last compaction revision.
-	Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
-}
-
-type watchClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewWatchClient(cc *grpc.ClientConn) WatchClient {
-	return &watchClient{cc}
-}
-
-func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &watchWatchClient{stream}
-	return x, nil
-}
-
-type Watch_WatchClient interface {
-	Send(*WatchRequest) error
-	Recv() (*WatchResponse, error)
-	grpc.ClientStream
-}
-
-type watchWatchClient struct {
-	grpc.ClientStream
-}
-
-func (x *watchWatchClient) Send(m *WatchRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *watchWatchClient) Recv() (*WatchResponse, error) {
-	m := new(WatchResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-// WatchServer is the server API for Watch service.
-type WatchServer interface {
-	// Watch watches for events happening or that have happened. Both input and output
-	// are streams; the input stream is for creating and canceling watchers and the output
-	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-	// for several watches at once. The entire event history can be watched starting from the
-	// last compaction revision.
-	Watch(Watch_WatchServer) error
-}
-
-// UnimplementedWatchServer can be embedded to have forward compatible implementations.
-type UnimplementedWatchServer struct {
-}
-
-func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
-	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-
-func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
-	s.RegisterService(&_Watch_serviceDesc, srv)
-}
-
-func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
-	return srv.(WatchServer).Watch(&watchWatchServer{stream})
-}
-
-type Watch_WatchServer interface {
-	Send(*WatchResponse) error
-	Recv() (*WatchRequest, error)
-	grpc.ServerStream
-}
-
-type watchWatchServer struct {
-	grpc.ServerStream
-}
-
-func (x *watchWatchServer) Send(m *WatchResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func (x *watchWatchServer) Recv() (*WatchRequest, error) {
-	m := new(WatchRequest)
-	if err := x.ServerStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-var _Watch_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Watch",
-	HandlerType: (*WatchServer)(nil),
-	Methods:     []grpc.MethodDesc{},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "Watch",
-			Handler:       _Watch_Watch_Handler,
-			ServerStreams: true,
-			ClientStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// LeaseClient is the client API for Lease service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LeaseClient interface {
-	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-	// within a given time to live period. All keys attached to the lease will be expired and
-	// deleted if the lease expires. Each expired key generates a delete event in the event history.
-	LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
-	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-	LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
-	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-	// to the server and streaming keep alive responses from the server to the client.
-	LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
-	// LeaseTimeToLive retrieves lease information.
-	LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
-	// LeaseLeases lists all existing leases.
-	LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
-}
-
-type leaseClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
-	return &leaseClient{cc}
-}
-
-func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
-	out := new(LeaseGrantResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
-	out := new(LeaseRevokeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &leaseLeaseKeepAliveClient{stream}
-	return x, nil
-}
-
-type Lease_LeaseKeepAliveClient interface {
-	Send(*LeaseKeepAliveRequest) error
-	Recv() (*LeaseKeepAliveResponse, error)
-	grpc.ClientStream
-}
-
-type leaseLeaseKeepAliveClient struct {
-	grpc.ClientStream
-}
-
-func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
-	m := new(LeaseKeepAliveResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
-	out := new(LeaseTimeToLiveResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
-	out := new(LeaseLeasesResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// LeaseServer is the server API for Lease service.
-type LeaseServer interface {
-	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-	// within a given time to live period. All keys attached to the lease will be expired and
-	// deleted if the lease expires. Each expired key generates a delete event in the event history.
-	LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
-	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-	LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
-	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-	// to the server and streaming keep alive responses from the server to the client.
-	LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
-	// LeaseTimeToLive retrieves lease information.
-	LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
-	// LeaseLeases lists all existing leases.
-	LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
-}
-
-// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
-type UnimplementedLeaseServer struct {
-}
-
-func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
-	return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
-}
-
-func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
-	s.RegisterService(&_Lease_serviceDesc, srv)
-}
-
-func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseGrantRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseGrant(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseGrant",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseRevokeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseRevoke(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
-	return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
-}
-
-type Lease_LeaseKeepAliveServer interface {
-	Send(*LeaseKeepAliveResponse) error
-	Recv() (*LeaseKeepAliveRequest, error)
-	grpc.ServerStream
-}
-
-type leaseLeaseKeepAliveServer struct {
-	grpc.ServerStream
-}
-
-func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
-	m := new(LeaseKeepAliveRequest)
-	if err := x.ServerStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseTimeToLiveRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseLeasesRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseLeases(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseLeases",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Lease_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Lease",
-	HandlerType: (*LeaseServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "LeaseGrant",
-			Handler:    _Lease_LeaseGrant_Handler,
-		},
-		{
-			MethodName: "LeaseRevoke",
-			Handler:    _Lease_LeaseRevoke_Handler,
-		},
-		{
-			MethodName: "LeaseTimeToLive",
-			Handler:    _Lease_LeaseTimeToLive_Handler,
-		},
-		{
-			MethodName: "LeaseLeases",
-			Handler:    _Lease_LeaseLeases_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "LeaseKeepAlive",
-			Handler:       _Lease_LeaseKeepAlive_Handler,
-			ServerStreams: true,
-			ClientStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// ClusterClient is the client API for Cluster service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ClusterClient interface {
-	// MemberAdd adds a member into the cluster.
-	MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
-	// MemberRemove removes an existing member from the cluster.
-	MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
-	// MemberUpdate updates the member configuration.
-	MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
-	// MemberList lists all the members in the cluster.
-	MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
-}
-
-type clusterClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
-	return &clusterClient{cc}
-}
-
-func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
-	out := new(MemberAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
-	out := new(MemberRemoveResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
-	out := new(MemberUpdateResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
-	out := new(MemberListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// ClusterServer is the server API for Cluster service.
-type ClusterServer interface {
-	// MemberAdd adds a member into the cluster.
-	MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
-	// MemberRemove removes an existing member from the cluster.
-	MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
-	// MemberUpdate updates the member configuration.
-	MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
-	// MemberList lists all the members in the cluster.
-	MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
-}
-
-// UnimplementedClusterServer can be embedded to have forward compatible implementations.
-type UnimplementedClusterServer struct {
-}
-
-func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
-}
-func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
-}
-func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
-}
-func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
-}
-
-func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
-	s.RegisterService(&_Cluster_serviceDesc, srv)
-}
-
-func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberRemoveRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberRemove(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberRemove",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberUpdateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberUpdate(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Cluster_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Cluster",
-	HandlerType: (*ClusterServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "MemberAdd",
-			Handler:    _Cluster_MemberAdd_Handler,
-		},
-		{
-			MethodName: "MemberRemove",
-			Handler:    _Cluster_MemberRemove_Handler,
-		},
-		{
-			MethodName: "MemberUpdate",
-			Handler:    _Cluster_MemberUpdate_Handler,
-		},
-		{
-			MethodName: "MemberList",
-			Handler:    _Cluster_MemberList_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-// MaintenanceClient is the client API for Maintenance service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MaintenanceClient interface {
-	// Alarm activates, deactivates, and queries alarms regarding cluster health.
-	Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
-	// Status gets the status of the member.
-	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
-	// Defragment defragments a member's backend database to recover storage space.
-	Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
-	// Hash computes the hash of the KV's backend.
-	// This is designed for testing; do not use this in production when there
-	// are ongoing transactions.
-	Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
-	// HashKV computes the hash of all MVCC keys up to a given revision.
-	HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
-	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-	Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
-	// MoveLeader requests current leader node to transfer its leadership to transferee.
-	MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
-}
-
-type maintenanceClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
-	return &maintenanceClient{cc}
-}
-
-func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
-	out := new(AlarmResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
-	out := new(StatusResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
-	out := new(DefragmentResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
-	out := new(HashResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
-	out := new(HashKVResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &maintenanceSnapshotClient{stream}
-	if err := x.ClientStream.SendMsg(in); err != nil {
-		return nil, err
-	}
-	if err := x.ClientStream.CloseSend(); err != nil {
-		return nil, err
-	}
-	return x, nil
-}
-
-type Maintenance_SnapshotClient interface {
-	Recv() (*SnapshotResponse, error)
-	grpc.ClientStream
-}
-
-type maintenanceSnapshotClient struct {
-	grpc.ClientStream
-}
-
-func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
-	m := new(SnapshotResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
-	out := new(MoveLeaderResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// MaintenanceServer is the server API for Maintenance service.
-type MaintenanceServer interface {
-	// Alarm activates, deactivates, and queries alarms regarding cluster health.
-	Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
-	// Status gets the status of the member.
-	Status(context.Context, *StatusRequest) (*StatusResponse, error)
-	// Defragment defragments a member's backend database to recover storage space.
-	Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
-	// Hash computes the hash of the KV's backend.
-	// This is designed for testing; do not use this in production when there
-	// are ongoing transactions.
-	Hash(context.Context, *HashRequest) (*HashResponse, error)
-	// HashKV computes the hash of all MVCC keys up to a given revision.
-	HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
-	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-	Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
-	// MoveLeader requests current leader node to transfer its leadership to transferee.
-	MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
-}
-
-// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
-type UnimplementedMaintenanceServer struct {
-}
-
-func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
-}
-func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
-}
-func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
-}
-func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
-}
-func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
-}
-func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
-	return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
-}
-func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
-}
-
-func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
-	s.RegisterService(&_Maintenance_serviceDesc, srv)
-}
-
-func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AlarmRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Alarm(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Alarm",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StatusRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Status(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Status",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DefragmentRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Defragment(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Defragment",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(HashRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Hash(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Hash",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(HashKVRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).HashKV(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/HashKV",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
-	m := new(SnapshotRequest)
-	if err := stream.RecvMsg(m); err != nil {
-		return err
-	}
-	return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
-}
-
-type Maintenance_SnapshotServer interface {
-	Send(*SnapshotResponse) error
-	grpc.ServerStream
-}
-
-type maintenanceSnapshotServer struct {
-	grpc.ServerStream
-}
-
-func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MoveLeaderRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).MoveLeader(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Maintenance_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Maintenance",
-	HandlerType: (*MaintenanceServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "Alarm",
-			Handler:    _Maintenance_Alarm_Handler,
-		},
-		{
-			MethodName: "Status",
-			Handler:    _Maintenance_Status_Handler,
-		},
-		{
-			MethodName: "Defragment",
-			Handler:    _Maintenance_Defragment_Handler,
-		},
-		{
-			MethodName: "Hash",
-			Handler:    _Maintenance_Hash_Handler,
-		},
-		{
-			MethodName: "HashKV",
-			Handler:    _Maintenance_HashKV_Handler,
-		},
-		{
-			MethodName: "MoveLeader",
-			Handler:    _Maintenance_MoveLeader_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "Snapshot",
-			Handler:       _Maintenance_Snapshot_Handler,
-			ServerStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// AuthClient is the client API for Auth service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AuthClient interface {
-	// AuthEnable enables authentication.
-	AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
-	// AuthDisable disables authentication.
-	AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
-	// Authenticate processes an authenticate request.
-	Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
-	// UserAdd adds a new user.
-	UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
-	// UserGet gets detailed user information.
-	UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
-	// UserList gets a list of all users.
-	UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
-	// UserDelete deletes a specified user.
-	UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
-	// UserChangePassword changes the password of a specified user.
-	UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
-	// UserGrant grants a role to a specified user.
-	UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
-	// UserRevokeRole revokes a role of specified user.
-	UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
-	// RoleAdd adds a new role.
-	RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
-	// RoleGet gets detailed role information.
-	RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
-	// RoleList gets lists of all roles.
-	RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
-	// RoleDelete deletes a specified role.
-	RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
-	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
-	RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
-	// RoleRevokePermission revokes a key or range permission of a specified role.
-	RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
-}
-
-type authClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewAuthClient(cc *grpc.ClientConn) AuthClient {
-	return &authClient{cc}
-}
-
-func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
-	out := new(AuthEnableResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
-	out := new(AuthDisableResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
-	out := new(AuthenticateResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
-	out := new(AuthUserAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
-	out := new(AuthUserGetResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
-	out := new(AuthUserListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
-	out := new(AuthUserDeleteResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
-	out := new(AuthUserChangePasswordResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
-	out := new(AuthUserGrantRoleResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
-	out := new(AuthUserRevokeRoleResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
-	out := new(AuthRoleAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
-	out := new(AuthRoleGetResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
-	out := new(AuthRoleListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
-	out := new(AuthRoleDeleteResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
-	out := new(AuthRoleGrantPermissionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
-	out := new(AuthRoleRevokePermissionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// AuthServer is the server API for Auth service.
-type AuthServer interface {
-	// AuthEnable enables authentication.
-	AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
-	// AuthDisable disables authentication.
-	AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
-	// Authenticate processes an authenticate request.
-	Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
-	// UserAdd adds a new user.
-	UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
-	// UserGet gets detailed user information.
-	UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
-	// UserList gets a list of all users.
-	UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
-	// UserDelete deletes a specified user.
-	UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
-	// UserChangePassword changes the password of a specified user.
-	UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
-	// UserGrant grants a role to a specified user.
-	UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
-	// UserRevokeRole revokes a role of specified user.
-	UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
-	// RoleAdd adds a new role.
-	RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
-	// RoleGet gets detailed role information.
-	RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
-	// RoleList gets lists of all roles.
-	RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
-	// RoleDelete deletes a specified role.
-	RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
-	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
-	RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
-	// RoleRevokePermission revokes a key or range permission of a specified role.
-	RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
-}
-
-// UnimplementedAuthServer can be embedded to have forward compatible implementations.
-type UnimplementedAuthServer struct {
-}
-
-func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
-}
-func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
-}
-func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
-}
-func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
-}
-func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
-}
-func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
-}
-func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
-}
-func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
-}
-func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
-}
-func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
-}
-func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
-}
-func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
-}
-func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
-}
-func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
-}
-func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
-}
-func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
-}
-
-func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
-	s.RegisterService(&_Auth_serviceDesc, srv)
-}
-
-func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthEnableRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).AuthEnable(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/AuthEnable",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthDisableRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).AuthDisable(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/AuthDisable",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthenticateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).Authenticate(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/Authenticate",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserGetRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserGet(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserGet",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserDeleteRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserDelete(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserDelete",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserChangePasswordRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserChangePassword(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserChangePassword",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserGrantRoleRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserGrantRole(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserGrantRole",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserRevokeRoleRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserRevokeRole(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleGetRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleGet(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleGet",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleDeleteRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleDelete(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleDelete",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleGrantPermissionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleGrantPermission(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleRevokePermissionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleRevokePermission(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Auth_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Auth",
-	HandlerType: (*AuthServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "AuthEnable",
-			Handler:    _Auth_AuthEnable_Handler,
-		},
-		{
-			MethodName: "AuthDisable",
-			Handler:    _Auth_AuthDisable_Handler,
-		},
-		{
-			MethodName: "Authenticate",
-			Handler:    _Auth_Authenticate_Handler,
-		},
-		{
-			MethodName: "UserAdd",
-			Handler:    _Auth_UserAdd_Handler,
-		},
-		{
-			MethodName: "UserGet",
-			Handler:    _Auth_UserGet_Handler,
-		},
-		{
-			MethodName: "UserList",
-			Handler:    _Auth_UserList_Handler,
-		},
-		{
-			MethodName: "UserDelete",
-			Handler:    _Auth_UserDelete_Handler,
-		},
-		{
-			MethodName: "UserChangePassword",
-			Handler:    _Auth_UserChangePassword_Handler,
-		},
-		{
-			MethodName: "UserGrantRole",
-			Handler:    _Auth_UserGrantRole_Handler,
-		},
-		{
-			MethodName: "UserRevokeRole",
-			Handler:    _Auth_UserRevokeRole_Handler,
-		},
-		{
-			MethodName: "RoleAdd",
-			Handler:    _Auth_RoleAdd_Handler,
-		},
-		{
-			MethodName: "RoleGet",
-			Handler:    _Auth_RoleGet_Handler,
-		},
-		{
-			MethodName: "RoleList",
-			Handler:    _Auth_RoleList_Handler,
-		},
-		{
-			MethodName: "RoleDelete",
-			Handler:    _Auth_RoleDelete_Handler,
-		},
-		{
-			MethodName: "RoleGrantPermission",
-			Handler:    _Auth_RoleGrantPermission_Handler,
-		},
-		{
-			MethodName: "RoleRevokePermission",
-			Handler:    _Auth_RoleRevokePermission_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RaftTerm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.MemberId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.ClusterId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.MaxCreateRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
-		i--
-		dAtA[i] = 0x68
-	}
-	if m.MinCreateRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
-		i--
-		dAtA[i] = 0x60
-	}
-	if m.MaxModRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
-		i--
-		dAtA[i] = 0x58
-	}
-	if m.MinModRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
-		i--
-		dAtA[i] = 0x50
-	}
-	if m.CountOnly {
-		i--
-		if m.CountOnly {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x48
-	}
-	if m.KeysOnly {
-		i--
-		if m.KeysOnly {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	if m.Serializable {
-		i--
-		if m.Serializable {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.SortTarget != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.SortOrder != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Limit != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Count != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Count))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.More {
-		i--
-		if m.More {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Kvs) > 0 {
-		for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *PutRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.IgnoreLease {
-		i--
-		if m.IgnoreLease {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.IgnoreValue {
-		i--
-		if m.IgnoreValue {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Lease != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Value) > 0 {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *PutResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv != nil {
-		{
-			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PrevKvs) > 0 {
-		for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Deleted != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RequestOp) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Request != nil {
-		{
-			size := m.Request.Size()
-			i -= size
-			if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestRange != nil {
-		{
-			size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestPut != nil {
-		{
-			size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestDeleteRange != nil {
-		{
-			size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestTxn != nil {
-		{
-			size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Response != nil {
-		{
-			size := m.Response.Size()
-			i -= size
-			if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseRange != nil {
-		{
-			size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponsePut != nil {
-		{
-			size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseDeleteRange != nil {
-		{
-			size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseTxn != nil {
-		{
-			size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	return len(dAtA) - i, nil
-}
-func (m *Compare) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x4
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.TargetUnion != nil {
-		{
-			size := m.TargetUnion.Size()
-			i -= size
-			if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.Target != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Target))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Result != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Result))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.Version))
-	i--
-	dAtA[i] = 0x20
-	return len(dAtA) - i, nil
-}
-func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
-	i--
-	dAtA[i] = 0x28
-	return len(dAtA) - i, nil
-}
-func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
-	i--
-	dAtA[i] = 0x30
-	return len(dAtA) - i, nil
-}
-func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.Value != nil {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x3a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
-	i--
-	dAtA[i] = 0x40
-	return len(dAtA) - i, nil
-}
-func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Failure) > 0 {
-		for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Success) > 0 {
-		for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if len(m.Compare) > 0 {
-		for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Responses) > 0 {
-		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Succeeded {
-		i--
-		if m.Succeeded {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Physical {
-		i--
-		if m.Physical {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.CompactRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.Hash != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Hash != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Blob) > 0 {
-		i -= len(m.Blob)
-		copy(dAtA[i:], m.Blob)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.RemainingBytes != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RequestUnion != nil {
-		{
-			size := m.RequestUnion.Size()
-			i -= size
-			if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.CreateRequest != nil {
-		{
-			size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.CancelRequest != nil {
-		{
-			size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ProgressRequest != nil {
-		{
-			size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Fragment {
-		i--
-		if m.Fragment {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x30
-	}
-	if len(m.Filters) > 0 {
-		dAtA22 := make([]byte, len(m.Filters)*10)
-		var j21 int
-		for _, num := range m.Filters {
-			for num >= 1<<7 {
-				dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
-				num >>= 7
-				j21++
-			}
-			dAtA22[j21] = uint8(num)
-			j21++
-		}
-		i -= j21
-		copy(dAtA[i:], dAtA22[:j21])
-		i = encodeVarintRpc(dAtA, i, uint64(j21))
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.ProgressNotify {
-		i--
-		if m.ProgressNotify {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.StartRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Events) > 0 {
-		for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x5a
-		}
-	}
-	if m.Fragment {
-		i--
-		if m.Fragment {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x38
-	}
-	if len(m.CancelReason) > 0 {
-		i -= len(m.CancelReason)
-		copy(dAtA[i:], m.CancelReason)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.CompactRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Canceled {
-		i--
-		if m.Canceled {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Created {
-		i--
-		if m.Created {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Error) > 0 {
-		i -= len(m.Error)
-		copy(dAtA[i:], m.Error)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
-		i--
-		dAtA[i] = 0x22
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Keys {
-		i--
-		if m.Keys {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Keys) > 0 {
-		for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Keys[iNdEx])
-			copy(dAtA[i:], m.Keys[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
-			i--
-			dAtA[i] = 0x2a
-		}
-	}
-	if m.GrantedTTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Leases) > 0 {
-		for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.ClientURLs) > 0 {
-		for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ClientURLs[iNdEx])
-			copy(dAtA[i:], m.ClientURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x22
-		}
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Member != nil {
-		{
-			size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.TargetID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Alarm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.MemberID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Action != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Alarm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.MemberID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Alarms) > 0 {
-		for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RaftTerm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.RaftIndex != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Leader != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.DbSize != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Version) > 0 {
-		i -= len(m.Version)
-		copy(dAtA[i:], m.Version)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.User) > 0 {
-		i -= len(m.User)
-		copy(dAtA[i:], m.User)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Perm != nil {
-		{
-			size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Token) > 0 {
-		i -= len(m.Token)
-		copy(dAtA[i:], m.Token)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Perm) > 0 {
-		for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Users) > 0 {
-		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Users[iNdEx])
-			copy(dAtA[i:], m.Users[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRpc(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *ResponseHeader) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ClusterId != 0 {
-		n += 1 + sovRpc(uint64(m.ClusterId))
-	}
-	if m.MemberId != 0 {
-		n += 1 + sovRpc(uint64(m.MemberId))
-	}
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.RaftTerm != 0 {
-		n += 1 + sovRpc(uint64(m.RaftTerm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RangeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Limit != 0 {
-		n += 1 + sovRpc(uint64(m.Limit))
-	}
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.SortOrder != 0 {
-		n += 1 + sovRpc(uint64(m.SortOrder))
-	}
-	if m.SortTarget != 0 {
-		n += 1 + sovRpc(uint64(m.SortTarget))
-	}
-	if m.Serializable {
-		n += 2
-	}
-	if m.KeysOnly {
-		n += 2
-	}
-	if m.CountOnly {
-		n += 2
-	}
-	if m.MinModRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MinModRevision))
-	}
-	if m.MaxModRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MaxModRevision))
-	}
-	if m.MinCreateRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MinCreateRevision))
-	}
-	if m.MaxCreateRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MaxCreateRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RangeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Kvs) > 0 {
-		for _, e := range m.Kvs {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.More {
-		n += 2
-	}
-	if m.Count != 0 {
-		n += 1 + sovRpc(uint64(m.Count))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *PutRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Value)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Lease != 0 {
-		n += 1 + sovRpc(uint64(m.Lease))
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.IgnoreValue {
-		n += 2
-	}
-	if m.IgnoreLease {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *PutResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.PrevKv != nil {
-		l = m.PrevKv.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DeleteRangeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DeleteRangeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Deleted != 0 {
-		n += 1 + sovRpc(uint64(m.Deleted))
-	}
-	if len(m.PrevKvs) > 0 {
-		for _, e := range m.PrevKvs {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RequestOp) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Request != nil {
-		n += m.Request.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RequestOp_RequestRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestRange != nil {
-		l = m.RequestRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestPut) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestPut != nil {
-		l = m.RequestPut.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestDeleteRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestDeleteRange != nil {
-		l = m.RequestDeleteRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestTxn) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestTxn != nil {
-		l = m.RequestTxn.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Response != nil {
-		n += m.Response.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ResponseOp_ResponseRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseRange != nil {
-		l = m.ResponseRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponsePut) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponsePut != nil {
-		l = m.ResponsePut.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseDeleteRange != nil {
-		l = m.ResponseDeleteRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponseTxn) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseTxn != nil {
-		l = m.ResponseTxn.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *Compare) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Result != 0 {
-		n += 1 + sovRpc(uint64(m.Result))
-	}
-	if m.Target != 0 {
-		n += 1 + sovRpc(uint64(m.Target))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.TargetUnion != nil {
-		n += m.TargetUnion.Size()
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 2 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Compare_Version) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.Version))
-	return n
-}
-func (m *Compare_CreateRevision) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.CreateRevision))
-	return n
-}
-func (m *Compare_ModRevision) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.ModRevision))
-	return n
-}
-func (m *Compare_Value) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Value != nil {
-		l = len(m.Value)
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *Compare_Lease) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.Lease))
-	return n
-}
-func (m *TxnRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Compare) > 0 {
-		for _, e := range m.Compare {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.Success) > 0 {
-		for _, e := range m.Success {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.Failure) > 0 {
-		for _, e := range m.Failure {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *TxnResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Succeeded {
-		n += 2
-	}
-	if len(m.Responses) > 0 {
-		for _, e := range m.Responses {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *CompactionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.Physical {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *CompactionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashKVRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashKVResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Hash != 0 {
-		n += 1 + sovRpc(uint64(m.Hash))
-	}
-	if m.CompactRevision != 0 {
-		n += 1 + sovRpc(uint64(m.CompactRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Hash != 0 {
-		n += 1 + sovRpc(uint64(m.Hash))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.RemainingBytes != 0 {
-		n += 1 + sovRpc(uint64(m.RemainingBytes))
-	}
-	l = len(m.Blob)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestUnion != nil {
-		n += m.RequestUnion.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchRequest_CreateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.CreateRequest != nil {
-		l = m.CreateRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchRequest_CancelRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.CancelRequest != nil {
-		l = m.CancelRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchRequest_ProgressRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ProgressRequest != nil {
-		l = m.ProgressRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchCreateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.StartRevision != 0 {
-		n += 1 + sovRpc(uint64(m.StartRevision))
-	}
-	if m.ProgressNotify {
-		n += 2
-	}
-	if len(m.Filters) > 0 {
-		l = 0
-		for _, e := range m.Filters {
-			l += sovRpc(uint64(e))
-		}
-		n += 1 + sovRpc(uint64(l)) + l
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.Fragment {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchCancelRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchProgressRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.Created {
-		n += 2
-	}
-	if m.Canceled {
-		n += 2
-	}
-	if m.CompactRevision != 0 {
-		n += 1 + sovRpc(uint64(m.CompactRevision))
-	}
-	l = len(m.CancelReason)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Fragment {
-		n += 2
-	}
-	if len(m.Events) > 0 {
-		for _, e := range m.Events {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseGrantRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseGrantResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	l = len(m.Error)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseRevokeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseRevokeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseKeepAliveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseKeepAliveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseTimeToLiveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.Keys {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseTimeToLiveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.GrantedTTL != 0 {
-		n += 1 + sovRpc(uint64(m.GrantedTTL))
-	}
-	if len(m.Keys) > 0 {
-		for _, b := range m.Keys {
-			l = len(b)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseLeasesRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseStatus) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseLeasesResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Leases) > 0 {
-		for _, e := range m.Leases {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Member) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.ClientURLs) > 0 {
-		for _, s := range m.ClientURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Member != nil {
-		l = m.Member.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberRemoveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberRemoveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberUpdateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberUpdateResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DefragmentRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DefragmentResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MoveLeaderRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.TargetID != 0 {
-		n += 1 + sovRpc(uint64(m.TargetID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MoveLeaderResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Action != 0 {
-		n += 1 + sovRpc(uint64(m.Action))
-	}
-	if m.MemberID != 0 {
-		n += 1 + sovRpc(uint64(m.MemberID))
-	}
-	if m.Alarm != 0 {
-		n += 1 + sovRpc(uint64(m.Alarm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmMember) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.MemberID != 0 {
-		n += 1 + sovRpc(uint64(m.MemberID))
-	}
-	if m.Alarm != 0 {
-		n += 1 + sovRpc(uint64(m.Alarm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Alarms) > 0 {
-		for _, e := range m.Alarms {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *StatusRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *StatusResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Version)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.DbSize != 0 {
-		n += 1 + sovRpc(uint64(m.DbSize))
-	}
-	if m.Leader != 0 {
-		n += 1 + sovRpc(uint64(m.Leader))
-	}
-	if m.RaftIndex != 0 {
-		n += 1 + sovRpc(uint64(m.RaftIndex))
-	}
-	if m.RaftTerm != 0 {
-		n += 1 + sovRpc(uint64(m.RaftTerm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthEnableRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthDisableRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthenticateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGetRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserDeleteRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserChangePasswordRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGrantRoleRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.User)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserRevokeRoleRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGetRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleDeleteRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Perm != nil {
-		l = m.Perm.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthEnableResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthDisableResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthenticateResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Token)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGetResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserDeleteResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserChangePasswordResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGrantRoleResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserRevokeRoleResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGetResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Perm) > 0 {
-		for _, e := range m.Perm {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Users) > 0 {
-		for _, s := range m.Users {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleDeleteResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRpc(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRpc(x uint64) (n int) {
-	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
-			}
-			m.ClusterId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ClusterId |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
-			}
-			m.MemberId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberId |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
-			}
-			m.RaftTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RangeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
-			}
-			m.Limit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Limit |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
-			}
-			m.SortOrder = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
-			}
-			m.SortTarget = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Serializable = bool(v != 0)
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.KeysOnly = bool(v != 0)
-		case 9:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.CountOnly = bool(v != 0)
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
-			}
-			m.MinModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MinModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
-			}
-			m.MaxModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MaxModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
-			}
-			m.MinCreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MinCreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 13:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
-			}
-			m.MaxCreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MaxCreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RangeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
-			if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.More = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
-			}
-			m.Count = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Count |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *PutRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
-			if m.Value == nil {
-				m.Value = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			m.Lease = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Lease |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.IgnoreValue = bool(v != 0)
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.IgnoreLease = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *PutResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.PrevKv == nil {
-				m.PrevKv = &mvccpb.KeyValue{}
-			}
-			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
-			}
-			m.Deleted = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Deleted |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
-			if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RequestOp) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &RangeRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestRange{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &PutRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestPut{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &DeleteRangeRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestDeleteRange{v}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &TxnRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestTxn{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ResponseOp) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &RangeResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseRange{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &PutResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponsePut{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &DeleteRangeResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseDeleteRange{v}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &TxnResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseTxn{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Compare) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Compare: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
-			}
-			m.Result = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Result |= Compare_CompareResult(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
-			}
-			m.Target = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Target |= Compare_CompareTarget(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_Version{v}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_CreateRevision{v}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_ModRevision{v}
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := make([]byte, postIndex-iNdEx)
-			copy(v, dAtA[iNdEx:postIndex])
-			m.TargetUnion = &Compare_Value{v}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_Lease{v}
-		case 64:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *TxnRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Compare = append(m.Compare, &Compare{})
-			if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Success = append(m.Success, &RequestOp{})
-			if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Failure = append(m.Failure, &RequestOp{})
-			if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *TxnResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Succeeded = bool(v != 0)
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Responses = append(m.Responses, &ResponseOp{})
-			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Physical = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
-			}
-			m.Hash = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Hash |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
-			}
-			m.CompactRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CompactRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
-			}
-			m.Hash = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Hash |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
-			}
-			m.RemainingBytes = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RemainingBytes |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
-			if m.Blob == nil {
-				m.Blob = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchCreateRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_CreateRequest{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchCancelRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_CancelRequest{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchProgressRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_ProgressRequest{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
-			}
-			m.StartRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.StartRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.ProgressNotify = bool(v != 0)
-		case 5:
-			if wireType == 0 {
-				var v WatchCreateRequest_FilterType
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= WatchCreateRequest_FilterType(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Filters = append(m.Filters, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRpc
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRpc
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				if elementCount != 0 && len(m.Filters) == 0 {
-					m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v WatchCreateRequest_FilterType
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRpc
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= WatchCreateRequest_FilterType(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Filters = append(m.Filters, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Fragment = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Created = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Canceled = bool(v != 0)
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
-			}
-			m.CompactRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CompactRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.CancelReason = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Fragment = bool(v != 0)
-		case 11:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Events = append(m.Events, &mvccpb.Event{})
-			if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Error = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Keys = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
-			}
-			m.GrantedTTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.GrantedTTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
-			copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Leases = append(m.Leases, &LeaseStatus{})
-			if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Member: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Member == nil {
-				m.Member = &Member{}
-			}
-			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
-			}
-			m.TargetID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TargetID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
-			}
-			m.Action = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
-			}
-			m.MemberID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			m.Alarm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Alarm |= AlarmType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmMember) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
-			}
-			m.MemberID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			m.Alarm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Alarm |= AlarmType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Alarms = append(m.Alarms, &AlarmMember{})
-			if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *StatusRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *StatusResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Version = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
-			}
-			m.DbSize = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.DbSize |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
-			}
-			m.Leader = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Leader |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
-			}
-			m.RaftIndex = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftIndex |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
-			}
-			m.RaftTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.User = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Perm == nil {
-				m.Perm = &authpb.Permission{}
-			}
-			if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Token = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Perm = append(m.Perm, &authpb.Permission{})
-			if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRpc(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRpc
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRpc
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRpc(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRpc
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
deleted file mode 100644
index d9da43c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
+++ /dev/null
@@ -1,1075 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/mvcc/mvccpb/kv.proto";
-import "etcd/auth/authpb/auth.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-service KV {
-  // Range gets the keys in the range from the key-value store.
-  rpc Range(RangeRequest) returns (RangeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/range"
-        body: "*"
-    };
-  }
-
-  // Put puts the given key into the key-value store.
-  // A put request increments the revision of the key-value store
-  // and generates one event in the event history.
-  rpc Put(PutRequest) returns (PutResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/put"
-        body: "*"
-    };
-  }
-
-  // DeleteRange deletes the given range from the key-value store.
-  // A delete request increments the revision of the key-value store
-  // and generates a delete event in the event history for every deleted key.
-  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/deleterange"
-        body: "*"
-    };
-  }
-
-  // Txn processes multiple requests in a single transaction.
-  // A txn request increments the revision of the key-value store
-  // and generates events with the same revision for every completed request.
-  // It is not allowed to modify the same key several times within one txn.
-  rpc Txn(TxnRequest) returns (TxnResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/txn"
-        body: "*"
-    };
-  }
-
-  // Compact compacts the event history in the etcd key-value store. The key-value
-  // store should be periodically compacted or the event history will continue to grow
-  // indefinitely.
-  rpc Compact(CompactionRequest) returns (CompactionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/compaction"
-        body: "*"
-    };
-  }
-}
-
-service Watch {
-  // Watch watches for events happening or that have happened. Both input and output
-  // are streams; the input stream is for creating and canceling watchers and the output
-  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-  // for several watches at once. The entire event history can be watched starting from the
-  // last compaction revision.
-  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/watch"
-        body: "*"
-    };
-  }
-}
-
-service Lease {
-  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-  // within a given time to live period. All keys attached to the lease will be expired and
-  // deleted if the lease expires. Each expired key generates a delete event in the event history.
-  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/lease/grant"
-        body: "*"
-    };
-  }
-
-  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/revoke"
-        body: "*"
-    };
-  }
-
-  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-  // to the server and streaming keep alive responses from the server to the client.
-  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/lease/keepalive"
-        body: "*"
-    };
-  }
-
-  // LeaseTimeToLive retrieves lease information.
-  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/timetolive"
-        body: "*"
-    };
-  }
-
-  // LeaseLeases lists all existing leases.
-  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/leases"
-        body: "*"
-    };
-  }
-}
-
-service Cluster {
-  // MemberAdd adds a member into the cluster.
-  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/add"
-        body: "*"
-    };
-  }
-
-  // MemberRemove removes an existing member from the cluster.
-  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/remove"
-        body: "*"
-    };
-  }
-
-  // MemberUpdate updates the member configuration.
-  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/update"
-        body: "*"
-    };
-  }
-
-  // MemberList lists all the members in the cluster.
-  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/list"
-        body: "*"
-    };
-  }
-}
-
-service Maintenance {
-  // Alarm activates, deactivates, and queries alarms regarding cluster health.
-  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/alarm"
-        body: "*"
-    };
-  }
-
-  // Status gets the status of the member.
-  rpc Status(StatusRequest) returns (StatusResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/status"
-        body: "*"
-    };
-  }
-
-  // Defragment defragments a member's backend database to recover storage space.
-  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/defragment"
-        body: "*"
-    };
-  }
-
-  // Hash computes the hash of the KV's backend.
-  // This is designed for testing; do not use this in production when there
-  // are ongoing transactions.
-  rpc Hash(HashRequest) returns (HashResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/hash"
-        body: "*"
-    };
-  }
-
-  // HashKV computes the hash of all MVCC keys up to a given revision.
-  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/hash"
-        body: "*"
-    };
-  }
-
-  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/snapshot"
-        body: "*"
-    };
-  }
-
-  // MoveLeader requests current leader node to transfer its leadership to transferee.
-  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/transfer-leadership"
-        body: "*"
-    };
-  }
-}
-
-service Auth {
-  // AuthEnable enables authentication.
-  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/enable"
-        body: "*"
-    };
-  }
-
-  // AuthDisable disables authentication.
-  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/disable"
-        body: "*"
-    };
-  }
-
-  // Authenticate processes an authenticate request.
-  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/authenticate"
-        body: "*"
-    };
-  }
-
-  // UserAdd adds a new user.
-  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/add"
-        body: "*"
-    };
-  }
-
-  // UserGet gets detailed user information.
-  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/get"
-        body: "*"
-    };
-  }
-
-  // UserList gets a list of all users.
-  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/list"
-        body: "*"
-    };
-  }
-
-  // UserDelete deletes a specified user.
-  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/delete"
-        body: "*"
-    };
-  }
-
-  // UserChangePassword changes the password of a specified user.
-  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/changepw"
-        body: "*"
-    };
-  }
-
-  // UserGrant grants a role to a specified user.
-  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/grant"
-        body: "*"
-    };
-  }
-
-  // UserRevokeRole revokes a role of specified user.
-  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/revoke"
-        body: "*"
-    };
-  }
-
-  // RoleAdd adds a new role.
-  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/add"
-        body: "*"
-    };
-  }
-
-  // RoleGet gets detailed role information.
-  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/get"
-        body: "*"
-    };
-  }
-
-  // RoleList gets lists of all roles.
-  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/list"
-        body: "*"
-    };
-  }
-
-  // RoleDelete deletes a specified role.
-  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/delete"
-        body: "*"
-    };
-  }
-
-  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
-  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/grant"
-        body: "*"
-    };
-  }
-
-  // RoleRevokePermission revokes a key or range permission of a specified role.
-  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/revoke"
-        body: "*"
-    };
-  }
-}
-
-message ResponseHeader {
-  // cluster_id is the ID of the cluster which sent the response.
-  uint64 cluster_id = 1;
-  // member_id is the ID of the member which sent the response.
-  uint64 member_id = 2;
-  // revision is the key-value store revision when the request was applied.
-  // For watch progress responses, the header.revision indicates progress. All future events
-  // recieved in this stream are guaranteed to have a higher revision number than the
-  // header.revision number.
-  int64 revision = 3;
-  // raft_term is the raft term when the request was applied.
-  uint64 raft_term = 4;
-}
-
-message RangeRequest {
-  enum SortOrder {
-	NONE = 0; // default, no sorting
-	ASCEND = 1; // lowest target value first
-	DESCEND = 2; // highest target value first
-  }
-  enum SortTarget {
-	KEY = 0;
-	VERSION = 1;
-	CREATE = 2;
-	MOD = 3;
-	VALUE = 4;
-  }
-
-  // key is the first key for the range. If range_end is not given, the request only looks up key.
-  bytes key = 1;
-  // range_end is the upper bound on the requested range [key, range_end).
-  // If range_end is '\0', the range is all keys >= key.
-  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
-  // then the range request gets all keys prefixed with key.
-  // If both key and range_end are '\0', then the range request returns all keys.
-  bytes range_end = 2;
-  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
-  // it is treated as no limit.
-  int64 limit = 3;
-  // revision is the point-in-time of the key-value store to use for the range.
-  // If revision is less or equal to zero, the range is over the newest key-value store.
-  // If the revision has been compacted, ErrCompacted is returned as a response.
-  int64 revision = 4;
-
-  // sort_order is the order for returned sorted results.
-  SortOrder sort_order = 5;
-
-  // sort_target is the key-value field to use for sorting.
-  SortTarget sort_target = 6;
-
-  // serializable sets the range request to use serializable member-local reads.
-  // Range requests are linearizable by default; linearizable requests have higher
-  // latency and lower throughput than serializable requests but reflect the current
-  // consensus of the cluster. For better performance, in exchange for possible stale reads,
-  // a serializable range request is served locally without needing to reach consensus
-  // with other nodes in the cluster.
-  bool serializable = 7;
-
-  // keys_only when set returns only the keys and not the values.
-  bool keys_only = 8;
-
-  // count_only when set returns only the count of the keys in the range.
-  bool count_only = 9;
-
-  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
-  // lesser mod revisions will be filtered away.
-  int64 min_mod_revision = 10;
-
-  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
-  // greater mod revisions will be filtered away.
-  int64 max_mod_revision = 11;
-
-  // min_create_revision is the lower bound for returned key create revisions; all keys with
-  // lesser create trevisions will be filtered away.
-  int64 min_create_revision = 12;
-
-  // max_create_revision is the upper bound for returned key create revisions; all keys with
-  // greater create revisions will be filtered away.
-  int64 max_create_revision = 13;
-}
-
-message RangeResponse {
-  ResponseHeader header = 1;
-  // kvs is the list of key-value pairs matched by the range request.
-  // kvs is empty when count is requested.
-  repeated mvccpb.KeyValue kvs = 2;
-  // more indicates if there are more keys to return in the requested range.
-  bool more = 3;
-  // count is set to the number of keys within the range when requested.
-  int64 count = 4;
-}
-
-message PutRequest {
-  // key is the key, in bytes, to put into the key-value store.
-  bytes key = 1;
-  // value is the value, in bytes, to associate with the key in the key-value store.
-  bytes value = 2;
-  // lease is the lease ID to associate with the key in the key-value store. A lease
-  // value of 0 indicates no lease.
-  int64 lease = 3;
-
-  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
-  // The previous key-value pair will be returned in the put response.
-  bool prev_kv = 4;
-
-  // If ignore_value is set, etcd updates the key using its current value.
-  // Returns an error if the key does not exist.
-  bool ignore_value = 5;
-
-  // If ignore_lease is set, etcd updates the key using its current lease.
-  // Returns an error if the key does not exist.
-  bool ignore_lease = 6;
-}
-
-message PutResponse {
-  ResponseHeader header = 1;
-  // if prev_kv is set in the request, the previous key-value pair will be returned.
-  mvccpb.KeyValue prev_kv = 2;
-}
-
-message DeleteRangeRequest {
-  // key is the first key to delete in the range.
-  bytes key = 1;
-  // range_end is the key following the last key to delete for the range [key, range_end).
-  // If range_end is not given, the range is defined to contain only the key argument.
-  // If range_end is one bit larger than the given key, then the range is all the keys
-  // with the prefix (the given key).
-  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
-  bytes range_end = 2;
-
-  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
-  // The previous key-value pairs will be returned in the delete response.
-  bool prev_kv = 3;
-}
-
-message DeleteRangeResponse {
-  ResponseHeader header = 1;
-  // deleted is the number of keys deleted by the delete range request.
-  int64 deleted = 2;
-  // if prev_kv is set in the request, the previous key-value pairs will be returned.
-  repeated mvccpb.KeyValue prev_kvs = 3;
-}
-
-message RequestOp {
-  // request is a union of request types accepted by a transaction.
-  oneof request {
-    RangeRequest request_range = 1;
-    PutRequest request_put = 2;
-    DeleteRangeRequest request_delete_range = 3;
-    TxnRequest request_txn = 4;
-  }
-}
-
-message ResponseOp {
-  // response is a union of response types returned by a transaction.
-  oneof response {
-    RangeResponse response_range = 1;
-    PutResponse response_put = 2;
-    DeleteRangeResponse response_delete_range = 3;
-    TxnResponse response_txn = 4;
-  }
-}
-
-message Compare {
-  enum CompareResult {
-    EQUAL = 0;
-    GREATER = 1;
-    LESS = 2;
-    NOT_EQUAL = 3;
-  }
-  enum CompareTarget {
-    VERSION = 0;
-    CREATE = 1;
-    MOD = 2;
-    VALUE= 3;
-    LEASE = 4;
-  }
-  // result is logical comparison operation for this comparison.
-  CompareResult result = 1;
-  // target is the key-value field to inspect for the comparison.
-  CompareTarget target = 2;
-  // key is the subject key for the comparison operation.
-  bytes key = 3;
-  oneof target_union {
-    // version is the version of the given key
-    int64 version = 4;
-    // create_revision is the creation revision of the given key
-    int64 create_revision = 5;
-    // mod_revision is the last modified revision of the given key.
-    int64 mod_revision = 6;
-    // value is the value of the given key, in bytes.
-    bytes value = 7;
-    // lease is the lease id of the given key.
-    int64 lease = 8;
-    // leave room for more target_union field tags, jump to 64
-  }
-
-  // range_end compares the given target to all keys in the range [key, range_end).
-  // See RangeRequest for more details on key ranges.
-  bytes range_end = 64;
-  // TODO: fill out with most of the rest of RangeRequest fields when needed.
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-message TxnRequest {
-  // compare is a list of predicates representing a conjunction of terms.
-  // If the comparisons succeed, then the success requests will be processed in order,
-  // and the response will contain their respective responses in order.
-  // If the comparisons fail, then the failure requests will be processed in order,
-  // and the response will contain their respective responses in order.
-  repeated Compare compare = 1;
-  // success is a list of requests which will be applied when compare evaluates to true.
-  repeated RequestOp success = 2;
-  // failure is a list of requests which will be applied when compare evaluates to false.
-  repeated RequestOp failure = 3;
-}
-
-message TxnResponse {
-  ResponseHeader header = 1;
-  // succeeded is set to true if the compare evaluated to true or false otherwise.
-  bool succeeded = 2;
-  // responses is a list of responses corresponding to the results from applying
-  // success if succeeded is true or failure if succeeded is false.
-  repeated ResponseOp responses = 3;
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-message CompactionRequest {
-  // revision is the key-value store revision for the compaction operation.
-  int64 revision = 1;
-  // physical is set so the RPC will wait until the compaction is physically
-  // applied to the local database such that compacted entries are totally
-  // removed from the backend database.
-  bool physical = 2;
-}
-
-message CompactionResponse {
-  ResponseHeader header = 1;
-}
-
-message HashRequest {
-}
-
-message HashKVRequest {
-  // revision is the key-value store revision for the hash operation.
-  int64 revision = 1;
-}
-
-message HashKVResponse {
-  ResponseHeader header = 1;
-  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
-  uint32 hash = 2;
-  // compact_revision is the compacted revision of key-value store when hash begins.
-  int64 compact_revision = 3;
-}
-
-message HashResponse {
-  ResponseHeader header = 1;
-  // hash is the hash value computed from the responding member's KV's backend.
-  uint32 hash = 2;
-}
-
-message SnapshotRequest {
-}
-
-message SnapshotResponse {
-  // header has the current key-value store information. The first header in the snapshot
-  // stream indicates the point in time of the snapshot.
-  ResponseHeader header = 1;
-
-  // remaining_bytes is the number of blob bytes to be sent after this message
-  uint64 remaining_bytes = 2;
-
-  // blob contains the next chunk of the snapshot in the snapshot stream.
-  bytes blob = 3;
-}
-
-message WatchRequest {
-  // request_union is a request to either create a new watcher or cancel an existing watcher.
-  oneof request_union {
-    WatchCreateRequest create_request = 1;
-    WatchCancelRequest cancel_request = 2;
-    WatchProgressRequest progress_request = 3;
-  }
-}
-
-message WatchCreateRequest {
-  // key is the key to register for watching.
-  bytes key = 1;
-  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
-  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
-  // or equal to the key argument are watched.
-  // If the range_end is one bit larger than the given key,
-  // then all keys with the prefix (the given key) will be watched.
-  bytes range_end = 2;
-  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
-  int64 start_revision = 3;
-  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
-  // no events to the new watcher if there are no recent events. It is useful when clients
-  // wish to recover a disconnected watcher starting from a recent known revision.
-  // The etcd server may decide how often it will send notifications based on current load.
-  bool progress_notify = 4;
-
-  enum FilterType {
-  // filter out put event.
-  NOPUT = 0;
-  // filter out delete event.
-  NODELETE = 1;
-  }
-  // filters filter the events at server side before it sends back to the watcher.
-  repeated FilterType filters = 5;
-
-  // If prev_kv is set, created watcher gets the previous KV before the event happens.
-  // If the previous KV is already compacted, nothing will be returned.
-  bool prev_kv = 6;
-
-  // If watch_id is provided and non-zero, it will be assigned to this watcher.
-  // Since creating a watcher in etcd is not a synchronous operation,
-  // this can be used ensure that ordering is correct when creating multiple
-  // watchers on the same stream. Creating a watcher with an ID already in
-  // use on the stream will cause an error to be returned.
-  int64 watch_id = 7;
-
-  // fragment enables splitting large revisions into multiple watch responses.
-  bool fragment = 8;
-}
-
-message WatchCancelRequest {
-  // watch_id is the watcher id to cancel so that no more events are transmitted.
-  int64 watch_id = 1;
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-message WatchProgressRequest {
-}
-
-message WatchResponse {
-  ResponseHeader header = 1;
-  // watch_id is the ID of the watcher that corresponds to the response.
-  int64 watch_id = 2;
-  // created is set to true if the response is for a create watch request.
-  // The client should record the watch_id and expect to receive events for
-  // the created watcher from the same stream.
-  // All events sent to the created watcher will attach with the same watch_id.
-  bool created = 3;
-  // canceled is set to true if the response is for a cancel watch request.
-  // No further events will be sent to the canceled watcher.
-  bool canceled = 4;
-  // compact_revision is set to the minimum index if a watcher tries to watch
-  // at a compacted index.
-  //
-  // This happens when creating a watcher at a compacted revision or the watcher cannot
-  // catch up with the progress of the key-value store.
-  //
-  // The client should treat the watcher as canceled and should not try to create any
-  // watcher with the same start_revision again.
-  int64 compact_revision  = 5;
-
-  // cancel_reason indicates the reason for canceling the watcher.
-  string cancel_reason = 6;
-
-  // framgment is true if large watch response was split over multiple responses.
-  bool fragment = 7;
-
-  repeated mvccpb.Event events = 11;
-}
-
-message LeaseGrantRequest {
-  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
-  int64 TTL = 1;
-  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
-  int64 ID = 2;
-}
-
-message LeaseGrantResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID for the granted lease.
-  int64 ID = 2;
-  // TTL is the server chosen lease time-to-live in seconds.
-  int64 TTL = 3;
-  string error = 4;
-}
-
-message LeaseRevokeRequest {
-  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
-  int64 ID = 1;
-}
-
-message LeaseRevokeResponse {
-  ResponseHeader header = 1;
-}
-
-message LeaseKeepAliveRequest {
-  // ID is the lease ID for the lease to keep alive.
-  int64 ID = 1;
-}
-
-message LeaseKeepAliveResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID from the keep alive request.
-  int64 ID = 2;
-  // TTL is the new time-to-live for the lease.
-  int64 TTL = 3;
-}
-
-message LeaseTimeToLiveRequest {
-  // ID is the lease ID for the lease.
-  int64 ID = 1;
-  // keys is true to query all the keys attached to this lease.
-  bool keys = 2;
-}
-
-message LeaseTimeToLiveResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID from the keep alive request.
-  int64 ID = 2;
-  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
-  int64 TTL = 3;
-  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
-  int64 grantedTTL = 4;
-  // Keys is the list of keys attached to this lease.
-  repeated bytes keys = 5;
-}
-
-message LeaseLeasesRequest {
-}
-
-message LeaseStatus {
-  int64 ID = 1;
-  // TODO: int64 TTL = 2;
-}
-
-message LeaseLeasesResponse {
-  ResponseHeader header = 1;
-  repeated LeaseStatus leases = 2;
-}
-
-message Member {
-  // ID is the member ID for this member.
-  uint64 ID = 1;
-  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
-  string name = 2;
-  // peerURLs is the list of URLs the member exposes to the cluster for communication.
-  repeated string peerURLs = 3;
-  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
-  repeated string clientURLs = 4;
-}
-
-message MemberAddRequest {
-  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
-  repeated string peerURLs = 1;
-}
-
-message MemberAddResponse {
-  ResponseHeader header = 1;
-  // member is the member information for the added member.
-  Member member = 2;
-  // members is a list of all members after adding the new member.
-  repeated Member members = 3;
-}
-
-message MemberRemoveRequest {
-  // ID is the member ID of the member to remove.
-  uint64 ID = 1;
-}
-
-message MemberRemoveResponse {
-  ResponseHeader header = 1;
-  // members is a list of all members after removing the member.
-  repeated Member members = 2;
-}
-
-message MemberUpdateRequest {
-  // ID is the member ID of the member to update.
-  uint64 ID = 1;
-  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
-  repeated string peerURLs = 2;
-}
-
-message MemberUpdateResponse{
-  ResponseHeader header = 1;
-  // members is a list of all members after updating the member.
-  repeated Member members = 2;
-}
-
-message MemberListRequest {
-}
-
-message MemberListResponse {
-  ResponseHeader header = 1;
-  // members is a list of all members associated with the cluster.
-  repeated Member members = 2;
-}
-
-message DefragmentRequest {
-}
-
-message DefragmentResponse {
-  ResponseHeader header = 1;
-}
-
-message MoveLeaderRequest {
-  // targetID is the node ID for the new leader.
-  uint64 targetID = 1;
-}
-
-message MoveLeaderResponse {
-  ResponseHeader header = 1;
-}
-
-enum AlarmType {
-	NONE = 0; // default, used to query if any alarm is active
-	NOSPACE = 1; // space quota is exhausted
-	CORRUPT = 2; // kv store corruption detected
-}
-
-message AlarmRequest {
-  enum AlarmAction {
-	GET = 0;
-	ACTIVATE = 1;
-	DEACTIVATE = 2;
-  }
-  // action is the kind of alarm request to issue. The action
-  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
-  // raised alarm.
-  AlarmAction action = 1;
-  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
-  // alarm request covers all members.
-  uint64 memberID = 2;
-  // alarm is the type of alarm to consider for this request.
-  AlarmType alarm = 3;
-}
-
-message AlarmMember {
-  // memberID is the ID of the member associated with the raised alarm.
-  uint64 memberID = 1;
-  // alarm is the type of alarm which has been raised.
-  AlarmType alarm = 2;
-}
-
-message AlarmResponse {
-  ResponseHeader header = 1;
-  // alarms is a list of alarms associated with the alarm request.
-  repeated AlarmMember alarms = 2;
-}
-
-message StatusRequest {
-}
-
-message StatusResponse {
-  ResponseHeader header = 1;
-  // version is the cluster protocol version used by the responding member.
-  string version = 2;
-  // dbSize is the size of the backend database, in bytes, of the responding member.
-  int64 dbSize = 3;
-  // leader is the member ID which the responding member believes is the current leader.
-  uint64 leader = 4;
-  // raftIndex is the current raft index of the responding member.
-  uint64 raftIndex = 5;
-  // raftTerm is the current raft term of the responding member.
-  uint64 raftTerm = 6;
-}
-
-message AuthEnableRequest {
-}
-
-message AuthDisableRequest {
-}
-
-message AuthenticateRequest {
-  string name = 1;
-  string password = 2;
-}
-
-message AuthUserAddRequest {
-  string name = 1;
-  string password = 2;
-}
-
-message AuthUserGetRequest {
-  string name = 1;
-}
-
-message AuthUserDeleteRequest {
-  // name is the name of the user to delete.
-  string name = 1;
-}
-
-message AuthUserChangePasswordRequest {
-  // name is the name of the user whose password is being changed.
-  string name = 1;
-  // password is the new password for the user.
-  string password = 2;
-}
-
-message AuthUserGrantRoleRequest {
-  // user is the name of the user which should be granted a given role.
-  string user = 1;
-  // role is the name of the role to grant to the user.
-  string role = 2;
-}
-
-message AuthUserRevokeRoleRequest {
-  string name = 1;
-  string role = 2;
-}
-
-message AuthRoleAddRequest {
-  // name is the name of the role to add to the authentication system.
-  string name = 1;
-}
-
-message AuthRoleGetRequest {
-  string role = 1;
-}
-
-message AuthUserListRequest {
-}
-
-message AuthRoleListRequest {
-}
-
-message AuthRoleDeleteRequest {
-  string role = 1;
-}
-
-message AuthRoleGrantPermissionRequest {
-  // name is the name of the role which will be granted the permission.
-  string name = 1;
-  // perm is the permission to grant to the role.
-  authpb.Permission perm = 2;
-}
-
-message AuthRoleRevokePermissionRequest {
-  string role = 1;
-  string key = 2;
-  string range_end = 3;
-}
-
-message AuthEnableResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthDisableResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthenticateResponse {
-  ResponseHeader header = 1;
-  // token is an authorized token that can be used in succeeding RPCs
-  string token = 2;
-}
-
-message AuthUserAddResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserGetResponse {
-  ResponseHeader header = 1;
-
-  repeated string roles = 2;
-}
-
-message AuthUserDeleteResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserChangePasswordResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserGrantRoleResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserRevokeRoleResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleAddResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleGetResponse {
-  ResponseHeader header = 1;
-
-  repeated authpb.Permission perm = 2;
-}
-
-message AuthRoleListResponse {
-  ResponseHeader header = 1;
-
-  repeated string roles = 2;
-}
-
-message AuthUserListResponse {
-  ResponseHeader header = 1;
-
-  repeated string users = 2;
-}
-
-message AuthRoleDeleteResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleGrantPermissionResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleRevokePermissionResponse {
-  ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
deleted file mode 100644
index 4679da5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
+++ /dev/null
@@ -1,830 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: kv.proto
-
-package mvccpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Event_EventType int32
-
-const (
-	PUT    Event_EventType = 0
-	DELETE Event_EventType = 1
-)
-
-var Event_EventType_name = map[int32]string{
-	0: "PUT",
-	1: "DELETE",
-}
-
-var Event_EventType_value = map[string]int32{
-	"PUT":    0,
-	"DELETE": 1,
-}
-
-func (x Event_EventType) String() string {
-	return proto.EnumName(Event_EventType_name, int32(x))
-}
-
-func (Event_EventType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{1, 0}
-}
-
-type KeyValue struct {
-	// key is the key in bytes. An empty key is not allowed.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// create_revision is the revision of last creation on this key.
-	CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
-	// mod_revision is the revision of last modification on this key.
-	ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
-	// version is the version of the key. A deletion resets
-	// the version to zero and any modification of the key
-	// increases its version.
-	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
-	// value is the value held by the key, in bytes.
-	Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
-	// lease is the ID of the lease that attached to key.
-	// When the attached lease expires, the key will be deleted.
-	// If lease is 0, then no lease is attached to the key.
-	Lease                int64    `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *KeyValue) Reset()         { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage()    {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{0}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
-	return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-type Event struct {
-	// type is the kind of event. If type is a PUT, it indicates
-	// new data has been stored to the key. If type is a DELETE,
-	// it indicates the key was deleted.
-	Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
-	// kv holds the KeyValue for the event.
-	// A PUT event contains current kv pair.
-	// A PUT event with kv.Version=1 indicates the creation of a key.
-	// A DELETE/EXPIRE event contains the deleted key with
-	// its modification revision set to the revision of deletion.
-	Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
-	// prev_kv holds the key-value pair before the event happens.
-	PrevKv               *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *Event) Reset()         { *m = Event{} }
-func (m *Event) String() string { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage()    {}
-func (*Event) Descriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{1}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Event.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Event) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
-	return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
-	xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
-	proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
-	proto.RegisterType((*Event)(nil), "mvccpb.Event")
-}
-
-func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
-
-var fileDescriptor_2216fe83c9c12408 = []byte{
-	// 303 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
-	0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
-	0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
-	0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
-	0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
-	0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
-	0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
-	0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
-	0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
-	0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
-	0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
-	0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
-	0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
-	0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
-	0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
-	0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
-	0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
-	0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
-	0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Lease != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Lease))
-		i--
-		dAtA[i] = 0x30
-	}
-	if len(m.Value) > 0 {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.Version != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Version))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.ModRevision != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.CreateRevision != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
-		i--
-		dAtA[i] = 0x10
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Event) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Event) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv != nil {
-		{
-			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintKv(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.Kv != nil {
-		{
-			size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintKv(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Type != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Type))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
-	offset -= sovKv(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *KeyValue) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.CreateRevision != 0 {
-		n += 1 + sovKv(uint64(m.CreateRevision))
-	}
-	if m.ModRevision != 0 {
-		n += 1 + sovKv(uint64(m.ModRevision))
-	}
-	if m.Version != 0 {
-		n += 1 + sovKv(uint64(m.Version))
-	}
-	l = len(m.Value)
-	if l > 0 {
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.Lease != 0 {
-		n += 1 + sovKv(uint64(m.Lease))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Event) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Type != 0 {
-		n += 1 + sovKv(uint64(m.Type))
-	}
-	if m.Kv != nil {
-		l = m.Kv.Size()
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.PrevKv != nil {
-		l = m.PrevKv.Size()
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovKv(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozKv(x uint64) (n int) {
-	return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
-			}
-			m.CreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
-			}
-			m.ModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			m.Version = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Version |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
-			if m.Value == nil {
-				m.Value = []byte{}
-			}
-			iNdEx = postIndex
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			m.Lease = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Lease |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipKv(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Event) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Event: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= Event_EventType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Kv == nil {
-				m.Kv = &KeyValue{}
-			}
-			if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.PrevKv == nil {
-				m.PrevKv = &KeyValue{}
-			}
-			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipKv(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipKv(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthKv
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthKv
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowKv
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipKv(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthKv
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowKv   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
deleted file mode 100644
index 23c911b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-syntax = "proto3";
-package mvccpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-message KeyValue {
-  // key is the key in bytes. An empty key is not allowed.
-  bytes key = 1;
-  // create_revision is the revision of last creation on this key.
-  int64 create_revision = 2;
-  // mod_revision is the revision of last modification on this key.
-  int64 mod_revision = 3;
-  // version is the version of the key. A deletion resets
-  // the version to zero and any modification of the key
-  // increases its version.
-  int64 version = 4;
-  // value is the value held by the key, in bytes.
-  bytes value = 5;
-  // lease is the ID of the lease that attached to key.
-  // When the attached lease expires, the key will be deleted.
-  // If lease is 0, then no lease is attached to the key.
-  int64 lease = 6;
-}
-
-message Event {
-  enum EventType {
-    PUT = 0;
-    DELETE = 1;
-  }
-  // type is the kind of event. If type is a PUT, it indicates
-  // new data has been stored to the key. If type is a DELETE,
-  // it indicates the key was deleted.
-  EventType type = 1;
-  // kv holds the KeyValue for the event.
-  // A PUT event contains current kv pair.
-  // A PUT event with kv.Version=1 indicates the creation of a key.
-  // A DELETE/EXPIRE event contains the deleted key with
-  // its modification revision set to the revision of deletion.
-  KeyValue kv = 2;
-
-  // prev_kv holds the key-value pair before the event happens.
-  KeyValue prev_kv = 3;
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
deleted file mode 100644
index 81b0a9d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"log"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-// assert that "discardLogger" satisfy "Logger" interface
-var _ Logger = &discardLogger{}
-
-// NewDiscardLogger returns a new Logger that discards everything except "fatal".
-func NewDiscardLogger() Logger { return &discardLogger{} }
-
-type discardLogger struct{}
-
-func (l *discardLogger) Info(args ...interface{})                    {}
-func (l *discardLogger) Infoln(args ...interface{})                  {}
-func (l *discardLogger) Infof(format string, args ...interface{})    {}
-func (l *discardLogger) Warning(args ...interface{})                 {}
-func (l *discardLogger) Warningln(args ...interface{})               {}
-func (l *discardLogger) Warningf(format string, args ...interface{}) {}
-func (l *discardLogger) Error(args ...interface{})                   {}
-func (l *discardLogger) Errorln(args ...interface{})                 {}
-func (l *discardLogger) Errorf(format string, args ...interface{})   {}
-func (l *discardLogger) Fatal(args ...interface{})                   { log.Fatal(args...) }
-func (l *discardLogger) Fatalln(args ...interface{})                 { log.Fatalln(args...) }
-func (l *discardLogger) Fatalf(format string, args ...interface{})   { log.Fatalf(format, args...) }
-func (l *discardLogger) V(lvl int) bool {
-	return false
-}
-func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
deleted file mode 100644
index e919f24..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package logutil includes utilities to facilitate logging.
-package logutil
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
deleted file mode 100644
index d57e173..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"fmt"
-
-	"github.com/coreos/pkg/capnslog"
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-var DefaultLogLevel = "info"
-
-// ConvertToZapLevel converts log level string to zapcore.Level.
-func ConvertToZapLevel(lvl string) zapcore.Level {
-	switch lvl {
-	case "debug":
-		return zap.DebugLevel
-	case "info":
-		return zap.InfoLevel
-	case "warn":
-		return zap.WarnLevel
-	case "error":
-		return zap.ErrorLevel
-	case "dpanic":
-		return zap.DPanicLevel
-	case "panic":
-		return zap.PanicLevel
-	case "fatal":
-		return zap.FatalLevel
-	default:
-		panic(fmt.Sprintf("unknown level %q", lvl))
-	}
-}
-
-// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
-// TODO: deprecate this in 3.5
-func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
-	switch lvl {
-	case "debug":
-		return capnslog.DEBUG
-	case "info":
-		return capnslog.INFO
-	case "warn":
-		return capnslog.WARNING
-	case "error":
-		return capnslog.ERROR
-	case "dpanic":
-		return capnslog.CRITICAL
-	case "panic":
-		return capnslog.CRITICAL
-	case "fatal":
-		return capnslog.CRITICAL
-	default:
-		panic(fmt.Sprintf("unknown level %q", lvl))
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
deleted file mode 100644
index e7da80e..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import "google.golang.org/grpc/grpclog"
-
-// Logger defines logging interface.
-// TODO: deprecate in v3.5.
-type Logger interface {
-	grpclog.LoggerV2
-
-	// Lvl returns logger if logger's verbosity level >= "lvl".
-	// Otherwise, logger that discards everything.
-	Lvl(lvl int) grpclog.LoggerV2
-}
-
-// assert that "defaultLogger" satisfy "Logger" interface
-var _ Logger = &defaultLogger{}
-
-// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
-//
-// For example:
-//
-//  var defaultLogger Logger
-//  g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
-//  defaultLogger = NewLogger(g)
-//
-func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
-
-type defaultLogger struct {
-	g grpclog.LoggerV2
-}
-
-func (l *defaultLogger) Info(args ...interface{})                    { l.g.Info(args...) }
-func (l *defaultLogger) Infoln(args ...interface{})                  { l.g.Info(args...) }
-func (l *defaultLogger) Infof(format string, args ...interface{})    { l.g.Infof(format, args...) }
-func (l *defaultLogger) Warning(args ...interface{})                 { l.g.Warning(args...) }
-func (l *defaultLogger) Warningln(args ...interface{})               { l.g.Warning(args...) }
-func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
-func (l *defaultLogger) Error(args ...interface{})                   { l.g.Error(args...) }
-func (l *defaultLogger) Errorln(args ...interface{})                 { l.g.Error(args...) }
-func (l *defaultLogger) Errorf(format string, args ...interface{})   { l.g.Errorf(format, args...) }
-func (l *defaultLogger) Fatal(args ...interface{})                   { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalln(args ...interface{})                 { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalf(format string, args ...interface{})   { l.g.Fatalf(format, args...) }
-func (l *defaultLogger) V(lvl int) bool                              { return l.g.V(lvl) }
-func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
-	if l.g.V(lvl) {
-		return l
-	}
-	return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
deleted file mode 100644
index 866b6f7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/coreos/pkg/capnslog"
-)
-
-var (
-	defaultMergePeriod     = time.Second
-	defaultTimeOutputScale = 10 * time.Millisecond
-
-	outputInterval = time.Second
-)
-
-// line represents a log line that can be printed out
-// through capnslog.PackageLogger.
-type line struct {
-	level capnslog.LogLevel
-	str   string
-}
-
-func (l line) append(s string) line {
-	return line{
-		level: l.level,
-		str:   l.str + " " + s,
-	}
-}
-
-// status represents the merge status of a line.
-type status struct {
-	period time.Duration
-
-	start time.Time // start time of latest merge period
-	count int       // number of merged lines from starting
-}
-
-func (s *status) isInMergePeriod(now time.Time) bool {
-	return s.period == 0 || s.start.Add(s.period).After(now)
-}
-
-func (s *status) isEmpty() bool { return s.count == 0 }
-
-func (s *status) summary(now time.Time) string {
-	ts := s.start.Round(defaultTimeOutputScale)
-	took := now.Round(defaultTimeOutputScale).Sub(ts)
-	return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
-}
-
-func (s *status) reset(now time.Time) {
-	s.start = now
-	s.count = 0
-}
-
-// MergeLogger supports merge logging, which merges repeated log lines
-// and prints summary log lines instead.
-//
-// For merge logging, MergeLogger prints out the line when the line appears
-// at the first time. MergeLogger holds the same log line printed within
-// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
-// It stops merging when the line doesn't appear within the
-// defaultMergePeriod.
-type MergeLogger struct {
-	*capnslog.PackageLogger
-
-	mu      sync.Mutex // protect statusm
-	statusm map[line]*status
-}
-
-func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
-	l := &MergeLogger{
-		PackageLogger: logger,
-		statusm:       make(map[line]*status),
-	}
-	go l.outputLoop()
-	return l
-}
-
-func (l *MergeLogger) MergeInfo(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.INFO,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.INFO,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeNotice(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.NOTICE,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.NOTICE,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeWarning(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.WARNING,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.WARNING,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeError(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.ERROR,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.ERROR,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) merge(ln line) {
-	l.mu.Lock()
-
-	// increase count if the logger is merging the line
-	if status, ok := l.statusm[ln]; ok {
-		status.count++
-		l.mu.Unlock()
-		return
-	}
-
-	// initialize status of the line
-	l.statusm[ln] = &status{
-		period: defaultMergePeriod,
-		start:  time.Now(),
-	}
-	// release the lock before IO operation
-	l.mu.Unlock()
-	// print out the line at its first time
-	l.PackageLogger.Logf(ln.level, ln.str)
-}
-
-func (l *MergeLogger) outputLoop() {
-	for now := range time.Tick(outputInterval) {
-		var outputs []line
-
-		l.mu.Lock()
-		for ln, status := range l.statusm {
-			if status.isInMergePeriod(now) {
-				continue
-			}
-			if status.isEmpty() {
-				delete(l.statusm, ln)
-				continue
-			}
-			outputs = append(outputs, ln.append(status.summary(now)))
-			status.reset(now)
-		}
-		l.mu.Unlock()
-
-		for _, o := range outputs {
-			l.PackageLogger.Logf(o.level, o.str)
-		}
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
deleted file mode 100644
index 378bee0..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"github.com/coreos/pkg/capnslog"
-	"google.golang.org/grpc/grpclog"
-)
-
-// assert that "packageLogger" satisfy "Logger" interface
-var _ Logger = &packageLogger{}
-
-// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
-//
-// For example:
-//
-//  var defaultLogger Logger
-//  defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
-//
-func NewPackageLogger(repo, pkg string) Logger {
-	return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
-}
-
-type packageLogger struct {
-	p *capnslog.PackageLogger
-}
-
-func (l *packageLogger) Info(args ...interface{})                    { l.p.Info(args...) }
-func (l *packageLogger) Infoln(args ...interface{})                  { l.p.Info(args...) }
-func (l *packageLogger) Infof(format string, args ...interface{})    { l.p.Infof(format, args...) }
-func (l *packageLogger) Warning(args ...interface{})                 { l.p.Warning(args...) }
-func (l *packageLogger) Warningln(args ...interface{})               { l.p.Warning(args...) }
-func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
-func (l *packageLogger) Error(args ...interface{})                   { l.p.Error(args...) }
-func (l *packageLogger) Errorln(args ...interface{})                 { l.p.Error(args...) }
-func (l *packageLogger) Errorf(format string, args ...interface{})   { l.p.Errorf(format, args...) }
-func (l *packageLogger) Fatal(args ...interface{})                   { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalln(args ...interface{})                 { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalf(format string, args ...interface{})   { l.p.Fatalf(format, args...) }
-func (l *packageLogger) V(lvl int) bool {
-	return l.p.LevelAt(capnslog.LogLevel(lvl))
-}
-func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
-	if l.p.LevelAt(capnslog.LogLevel(lvl)) {
-		return l
-	}
-	return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
deleted file mode 100644
index 2f69223..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"sort"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-// DefaultZapLoggerConfig defines default zap logger configuration.
-var DefaultZapLoggerConfig = zap.Config{
-	Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
-
-	Development: false,
-	Sampling: &zap.SamplingConfig{
-		Initial:    100,
-		Thereafter: 100,
-	},
-
-	Encoding: "json",
-
-	// copied from "zap.NewProductionEncoderConfig" with some updates
-	EncoderConfig: zapcore.EncoderConfig{
-		TimeKey:        "ts",
-		LevelKey:       "level",
-		NameKey:        "logger",
-		CallerKey:      "caller",
-		MessageKey:     "msg",
-		StacktraceKey:  "stacktrace",
-		LineEnding:     zapcore.DefaultLineEnding,
-		EncodeLevel:    zapcore.LowercaseLevelEncoder,
-		EncodeTime:     zapcore.ISO8601TimeEncoder,
-		EncodeDuration: zapcore.StringDurationEncoder,
-		EncodeCaller:   zapcore.ShortCallerEncoder,
-	},
-
-	// Use "/dev/null" to discard all
-	OutputPaths:      []string{"stderr"},
-	ErrorOutputPaths: []string{"stderr"},
-}
-
-// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
-func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
-	outputs := make(map[string]struct{})
-	for _, v := range cfg.OutputPaths {
-		outputs[v] = struct{}{}
-	}
-	for _, v := range outputPaths {
-		outputs[v] = struct{}{}
-	}
-	outputSlice := make([]string, 0)
-	if _, ok := outputs["/dev/null"]; ok {
-		// "/dev/null" to discard all
-		outputSlice = []string{"/dev/null"}
-	} else {
-		for k := range outputs {
-			outputSlice = append(outputSlice, k)
-		}
-	}
-	cfg.OutputPaths = outputSlice
-	sort.Strings(cfg.OutputPaths)
-
-	errOutputs := make(map[string]struct{})
-	for _, v := range cfg.ErrorOutputPaths {
-		errOutputs[v] = struct{}{}
-	}
-	for _, v := range errorOutputPaths {
-		errOutputs[v] = struct{}{}
-	}
-	errOutputSlice := make([]string, 0)
-	if _, ok := errOutputs["/dev/null"]; ok {
-		// "/dev/null" to discard all
-		errOutputSlice = []string{"/dev/null"}
-	} else {
-		for k := range errOutputs {
-			errOutputSlice = append(errOutputSlice, k)
-		}
-	}
-	cfg.ErrorOutputPaths = errOutputSlice
-	sort.Strings(cfg.ErrorOutputPaths)
-
-	return cfg
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
deleted file mode 100644
index 3f48d81..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-	"google.golang.org/grpc/grpclog"
-)
-
-// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
-// It discards all INFO level logging in gRPC, if debug level
-// is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
-	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
-	if err != nil {
-		return nil, err
-	}
-	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
-// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
-// if debug level is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
-	// "AddCallerSkip" to annotate caller outside of "logutil"
-	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
-	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapGRPCLogger struct {
-	lg    *zap.Logger
-	sugar *zap.SugaredLogger
-}
-
-func (zl *zapGRPCLogger) Info(args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapGRPCLogger) Warning(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
-	zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Error(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
-	zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
-	zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapGRPCLogger) V(l int) bool {
-	// infoLog == 0
-	if l <= 0 { // debug level, then we ignore info level in gRPC
-		return !zl.lg.Core().Enabled(zapcore.DebugLevel)
-	}
-	return true
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
deleted file mode 100644
index b1788bc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package logutil
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/etcd/pkg/systemd"
-
-	"github.com/coreos/go-systemd/journal"
-	"go.uber.org/zap/zapcore"
-)
-
-// NewJournalWriter wraps "io.Writer" to redirect log output
-// to the local systemd journal. If journald send fails, it fails
-// back to writing to the original writer.
-// The decode overhead is only <30µs per write.
-// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
-func NewJournalWriter(wr io.Writer) (io.Writer, error) {
-	return &journalWriter{Writer: wr}, systemd.DialJournal()
-}
-
-type journalWriter struct {
-	io.Writer
-}
-
-// WARN: assume that etcd uses default field names in zap encoder config
-// make sure to keep this up-to-date!
-type logLine struct {
-	Level  string `json:"level"`
-	Caller string `json:"caller"`
-}
-
-func (w *journalWriter) Write(p []byte) (int, error) {
-	line := &logLine{}
-	if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
-		return 0, err
-	}
-
-	var pri journal.Priority
-	switch line.Level {
-	case zapcore.DebugLevel.String():
-		pri = journal.PriDebug
-	case zapcore.InfoLevel.String():
-		pri = journal.PriInfo
-
-	case zapcore.WarnLevel.String():
-		pri = journal.PriWarning
-	case zapcore.ErrorLevel.String():
-		pri = journal.PriErr
-
-	case zapcore.DPanicLevel.String():
-		pri = journal.PriCrit
-	case zapcore.PanicLevel.String():
-		pri = journal.PriCrit
-	case zapcore.FatalLevel.String():
-		pri = journal.PriCrit
-
-	default:
-		panic(fmt.Errorf("unknown log level: %q", line.Level))
-	}
-
-	err := journal.Send(string(p), pri, map[string]string{
-		"PACKAGE":           filepath.Dir(line.Caller),
-		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
-	})
-	if err != nil {
-		// "journal" also falls back to stderr
-		// "fmt.Fprintln(os.Stderr, s)"
-		return w.Writer.Write(p)
-	}
-	return 0, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
deleted file mode 100644
index 012d688..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"errors"
-
-	"github.com/coreos/etcd/raft"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-// NewRaftLogger builds "raft.Logger" from "*zap.Config".
-func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
-	if lcfg == nil {
-		return nil, errors.New("nil zap.Config")
-	}
-	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
-	if err != nil {
-		return nil, err
-	}
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
-func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
-// and "zapcore.WriteSyncer".
-func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
-	// "AddCallerSkip" to annotate caller outside of "logutil"
-	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapRaftLogger struct {
-	lg    *zap.Logger
-	sugar *zap.SugaredLogger
-}
-
-func (zl *zapRaftLogger) Debug(args ...interface{}) {
-	zl.sugar.Debug(args...)
-}
-
-func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
-	zl.sugar.Debugf(format, args...)
-}
-
-func (zl *zapRaftLogger) Error(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
-	zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapRaftLogger) Info(args ...interface{}) {
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
-	zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapRaftLogger) Warning(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
-	zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapRaftLogger) Fatal(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
-	zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapRaftLogger) Panic(args ...interface{}) {
-	zl.sugar.Panic(args...)
-}
-
-func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
-	zl.sugar.Panicf(format, args...)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
deleted file mode 100644
index 30e77ce..0000000
--- a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package systemd provides utility functions for systemd.
-package systemd
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
deleted file mode 100644
index b861c69..0000000
--- a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package systemd
-
-import "net"
-
-// DialJournal returns no error if the process can dial journal socket.
-// Returns an error if dial failed, whichi indicates journald is not available
-// (e.g. run embedded etcd as docker daemon).
-// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
-func DialJournal() error {
-	conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
-	if conn != nil {
-		defer conn.Close()
-	}
-	return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go
deleted file mode 100644
index de8ef0b..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package types declares various data types and implements type-checking
-// functions.
-package types
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
deleted file mode 100644
index 1b042d9..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/id.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"strconv"
-)
-
-// ID represents a generic identifier which is canonically
-// stored as a uint64 but is typically represented as a
-// base-16 string for input/output
-type ID uint64
-
-func (i ID) String() string {
-	return strconv.FormatUint(uint64(i), 16)
-}
-
-// IDFromString attempts to create an ID from a base-16 string.
-func IDFromString(s string) (ID, error) {
-	i, err := strconv.ParseUint(s, 16, 64)
-	return ID(i), err
-}
-
-// IDSlice implements the sort interface
-type IDSlice []ID
-
-func (p IDSlice) Len() int           { return len(p) }
-func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
-func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
deleted file mode 100644
index c111b0c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/set.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"reflect"
-	"sort"
-	"sync"
-)
-
-type Set interface {
-	Add(string)
-	Remove(string)
-	Contains(string) bool
-	Equals(Set) bool
-	Length() int
-	Values() []string
-	Copy() Set
-	Sub(Set) Set
-}
-
-func NewUnsafeSet(values ...string) *unsafeSet {
-	set := &unsafeSet{make(map[string]struct{})}
-	for _, v := range values {
-		set.Add(v)
-	}
-	return set
-}
-
-func NewThreadsafeSet(values ...string) *tsafeSet {
-	us := NewUnsafeSet(values...)
-	return &tsafeSet{us, sync.RWMutex{}}
-}
-
-type unsafeSet struct {
-	d map[string]struct{}
-}
-
-// Add adds a new value to the set (no-op if the value is already present)
-func (us *unsafeSet) Add(value string) {
-	us.d[value] = struct{}{}
-}
-
-// Remove removes the given value from the set
-func (us *unsafeSet) Remove(value string) {
-	delete(us.d, value)
-}
-
-// Contains returns whether the set contains the given value
-func (us *unsafeSet) Contains(value string) (exists bool) {
-	_, exists = us.d[value]
-	return exists
-}
-
-// ContainsAll returns whether the set contains all given values
-func (us *unsafeSet) ContainsAll(values []string) bool {
-	for _, s := range values {
-		if !us.Contains(s) {
-			return false
-		}
-	}
-	return true
-}
-
-// Equals returns whether the contents of two sets are identical
-func (us *unsafeSet) Equals(other Set) bool {
-	v1 := sort.StringSlice(us.Values())
-	v2 := sort.StringSlice(other.Values())
-	v1.Sort()
-	v2.Sort()
-	return reflect.DeepEqual(v1, v2)
-}
-
-// Length returns the number of elements in the set
-func (us *unsafeSet) Length() int {
-	return len(us.d)
-}
-
-// Values returns the values of the Set in an unspecified order.
-func (us *unsafeSet) Values() (values []string) {
-	values = make([]string, 0)
-	for val := range us.d {
-		values = append(values, val)
-	}
-	return values
-}
-
-// Copy creates a new Set containing the values of the first
-func (us *unsafeSet) Copy() Set {
-	cp := NewUnsafeSet()
-	for val := range us.d {
-		cp.Add(val)
-	}
-
-	return cp
-}
-
-// Sub removes all elements in other from the set
-func (us *unsafeSet) Sub(other Set) Set {
-	oValues := other.Values()
-	result := us.Copy().(*unsafeSet)
-
-	for _, val := range oValues {
-		if _, ok := result.d[val]; !ok {
-			continue
-		}
-		delete(result.d, val)
-	}
-
-	return result
-}
-
-type tsafeSet struct {
-	us *unsafeSet
-	m  sync.RWMutex
-}
-
-func (ts *tsafeSet) Add(value string) {
-	ts.m.Lock()
-	defer ts.m.Unlock()
-	ts.us.Add(value)
-}
-
-func (ts *tsafeSet) Remove(value string) {
-	ts.m.Lock()
-	defer ts.m.Unlock()
-	ts.us.Remove(value)
-}
-
-func (ts *tsafeSet) Contains(value string) (exists bool) {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Contains(value)
-}
-
-func (ts *tsafeSet) Equals(other Set) bool {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Equals(other)
-}
-
-func (ts *tsafeSet) Length() int {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Length()
-}
-
-func (ts *tsafeSet) Values() (values []string) {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Values()
-}
-
-func (ts *tsafeSet) Copy() Set {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	usResult := ts.us.Copy().(*unsafeSet)
-	return &tsafeSet{usResult, sync.RWMutex{}}
-}
-
-func (ts *tsafeSet) Sub(other Set) Set {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	usResult := ts.us.Sub(other).(*unsafeSet)
-	return &tsafeSet{usResult, sync.RWMutex{}}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go
deleted file mode 100644
index 0dd9ca7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/slice.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-// Uint64Slice implements sort interface
-type Uint64Slice []uint64
-
-func (p Uint64Slice) Len() int           { return len(p) }
-func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
deleted file mode 100644
index 9e5d03f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/urls.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"errors"
-	"fmt"
-	"net"
-	"net/url"
-	"sort"
-	"strings"
-)
-
-type URLs []url.URL
-
-func NewURLs(strs []string) (URLs, error) {
-	all := make([]url.URL, len(strs))
-	if len(all) == 0 {
-		return nil, errors.New("no valid URLs given")
-	}
-	for i, in := range strs {
-		in = strings.TrimSpace(in)
-		u, err := url.Parse(in)
-		if err != nil {
-			return nil, err
-		}
-		if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
-			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
-		}
-		if _, _, err := net.SplitHostPort(u.Host); err != nil {
-			return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
-		}
-		if u.Path != "" {
-			return nil, fmt.Errorf("URL must not contain a path: %s", in)
-		}
-		all[i] = *u
-	}
-	us := URLs(all)
-	us.Sort()
-
-	return us, nil
-}
-
-func MustNewURLs(strs []string) URLs {
-	urls, err := NewURLs(strs)
-	if err != nil {
-		panic(err)
-	}
-	return urls
-}
-
-func (us URLs) String() string {
-	return strings.Join(us.StringSlice(), ",")
-}
-
-func (us *URLs) Sort() {
-	sort.Sort(us)
-}
-func (us URLs) Len() int           { return len(us) }
-func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
-func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
-
-func (us URLs) StringSlice() []string {
-	out := make([]string, len(us))
-	for i := range us {
-		out[i] = us[i].String()
-	}
-
-	return out
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
deleted file mode 100644
index 47690cc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// URLsMap is a map from a name to its URLs.
-type URLsMap map[string]URLs
-
-// NewURLsMap returns a URLsMap instantiated from the given string,
-// which consists of discovery-formatted names-to-URLs, like:
-// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
-func NewURLsMap(s string) (URLsMap, error) {
-	m := parse(s)
-
-	cl := URLsMap{}
-	for name, urls := range m {
-		us, err := NewURLs(urls)
-		if err != nil {
-			return nil, err
-		}
-		cl[name] = us
-	}
-	return cl, nil
-}
-
-// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
-// string values in the map can be multiple values separated by the sep string.
-func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
-	var err error
-	um := URLsMap{}
-	for k, v := range m {
-		um[k], err = NewURLs(strings.Split(v, sep))
-		if err != nil {
-			return nil, err
-		}
-	}
-	return um, nil
-}
-
-// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
-func (c URLsMap) String() string {
-	var pairs []string
-	for name, urls := range c {
-		for _, url := range urls {
-			pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
-		}
-	}
-	sort.Strings(pairs)
-	return strings.Join(pairs, ",")
-}
-
-// URLs returns a list of all URLs.
-// The returned list is sorted in ascending lexicographical order.
-func (c URLsMap) URLs() []string {
-	var urls []string
-	for _, us := range c {
-		for _, u := range us {
-			urls = append(urls, u.String())
-		}
-	}
-	sort.Strings(urls)
-	return urls
-}
-
-// Len returns the size of URLsMap.
-func (c URLsMap) Len() int {
-	return len(c)
-}
-
-// parse parses the given string and returns a map listing the values specified for each key.
-func parse(s string) map[string][]string {
-	m := make(map[string][]string)
-	for s != "" {
-		key := s
-		if i := strings.IndexAny(key, ","); i >= 0 {
-			key, s = key[:i], key[i+1:]
-		} else {
-			s = ""
-		}
-		if key == "" {
-			continue
-		}
-		value := ""
-		if i := strings.Index(key, "="); i >= 0 {
-			key, value = key[:i], key[i+1:]
-		}
-		m[key] = append(m[key], value)
-	}
-	return m
-}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
deleted file mode 100644
index fde22b1..0000000
--- a/vendor/github.com/coreos/etcd/raft/README.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# Raft library
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
-
-Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
-
-To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
-
-In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
-
-A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-# Features
-
-This raft implementation is a full feature implementation of Raft protocol. Features includes:
-
-- Leader election
-- Log replication
-- Log compaction 
-- Membership changes
-- Leadership transfer extension
-- Efficient linearizable read-only queries served by both the leader and followers
-  - leader checks with quorum and bypasses Raft log before processing read-only queries
-  - followers asks leader to get a safe read index before processing read-only queries
-- More efficient lease-based linearizable read-only queries served by both the leader and followers
-  - leader bypasses Raft log and processing read-only queries locally
-  - followers asks leader to get a safe read index before processing read-only queries
-  - this approach relies on the clock of the all the machines in raft group
-
-This raft implementation also includes a few optional enhancements:
-
-- Optimistic pipelining to reduce log replication latency
-- Flow control for log replication
-- Batching Raft messages to reduce synchronized network I/O calls
-- Batching log entries to reduce disk synchronized I/O
-- Writing to leader's disk in parallel
-- Internal proposal redirection from followers to leader
-- Automatic stepping down when the leader loses quorum 
-
-## Notable Users
-
-- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
-- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
-- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
-- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
-- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
-- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
-
-## Usage
-
-The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a three-node cluster
-```go
-  storage := raft.NewMemoryStorage()
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-  // Set peer list to the other nodes in the cluster.
-  // Note that they need to be started separately as well.
-  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-```
-
-Start a single node cluster, like so:
-```go
-  // Create storage and config as shown above.
-  // Set peer list to itself, so this node can become the leader of this single-node cluster.
-  peers := []raft.Peer{{ID: 0x01}}
-  n := raft.StartNode(c, peers)
-```
-
-To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
-```go
-  // Create storage and config as shown above.
-  n := raft.StartNode(c, nil)
-```
-
-To restart a node from previous state:
-```go
-  storage := raft.NewMemoryStorage()
-
-  // Recover the in-memory storage from persistent snapshot, state and entries.
-  storage.ApplySnapshot(snapshot)
-  storage.SetHardState(state)
-  storage.Append(entries)
-
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-
-  // Restart raft without peer information.
-  // Peer information is already included in the storage.
-  n := raft.RestartNode(c)
-```
-
-After creating a Node, the user has a few responsibilities:
-
-First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
-
-1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
-
-Third, after receiving a message from another node, pass it to Node.Step:
-
-```go
-	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
-		n.Step(ctx, m)
-	}
-```
-
-Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-```go
-  for {
-    select {
-    case <-s.Ticker:
-      n.Tick()
-    case rd := <-s.Node.Ready():
-      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
-      send(rd.Messages)
-      if !raft.IsEmptySnap(rd.Snapshot) {
-        processSnapshot(rd.Snapshot)
-      }
-      for _, entry := range rd.CommittedEntries {
-        process(entry)
-        if entry.Type == raftpb.EntryConfChange {
-          var cc raftpb.ConfChange
-          cc.Unmarshal(entry.Data)
-          s.Node.ApplyConfChange(cc)
-        }
-      }
-      s.Node.Advance()
-    case <-s.done:
-      return
-    }
-  }
-```
-
-To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
-
-```go
-	n.Propose(ctx, data)
-```
-
-If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-```go
-	n.ProposeConfChange(ctx, cc)
-```
-
-After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
-
-```go
-	var cc raftpb.ConfChange
-	cc.Unmarshal(data)
-	n.ApplyConfChange(cc)
-```
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-## Implementation notes
-
-This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
-
-To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
-
-This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md
deleted file mode 100644
index 7bc0531..0000000
--- a/vendor/github.com/coreos/etcd/raft/design.md
+++ /dev/null
@@ -1,57 +0,0 @@
-## Progress
-
-Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
-
-`replication message` is a `msgApp` with log entries.
-
-A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
-
-A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
-
-```
-                            +--------------------------------------------------------+          
-                            |                  send snapshot                         |          
-                            |                                                        |          
-                  +---------+----------+                                  +----------v---------+
-              +--->       probe        |                                  |      snapshot      |
-              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
-              |   +---------+----------+                                  +--------------------+
-              |             |            1. snapshot success                                    
-              |             |               (next=snapshot.index + 1)                           
-              |             |            2. snapshot failure                                    
-              |             |               (no change)                                         
-              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
-              |             |               (match=m.index,next=match+1)                        
-receives msgAppResp(rej=true)                                                                   
-(next=match+1)|             |                                                                   
-              |             |                                                                   
-              |             |                                                                   
-              |             |   receives msgAppResp(rej=false&&index>match)                     
-              |             |   (match=m.index,next=match+1)                                    
-              |             |                                                                   
-              |             |                                                                   
-              |             |                                                                   
-              |   +---------v----------+                                                        
-              |   |     replicate      |                                                        
-              +---+  max inflight = n  |                                                        
-                  +--------------------+                                                        
-```
-
-When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
-
-When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
-
-When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
-
-A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
-
-A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
-
-A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
-
-### Flow Control
-
-1. limit the max size of message sent per message. Max should be configurable.
-Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
-
-2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
deleted file mode 100644
index b55c591..0000000
--- a/vendor/github.com/coreos/etcd/raft/doc.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package raft sends and receives messages in the Protocol Buffer format
-defined in the raftpb package.
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-A simple example application, _raftexample_, is also available to help illustrate
-how to use this package in practice:
-https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-Usage
-
-The primary object in raft is a Node. You either start a Node from scratch
-using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a node from scratch:
-
-  storage := raft.NewMemoryStorage()
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-
-To restart a node from previous state:
-
-  storage := raft.NewMemoryStorage()
-
-  // recover the in-memory storage from persistent
-  // snapshot, state and entries.
-  storage.ApplySnapshot(snapshot)
-  storage.SetHardState(state)
-  storage.Append(entries)
-
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-
-  // restart raft without peer information.
-  // peer information is already included in the storage.
-  n := raft.RestartNode(c)
-
-Now that you are holding onto a Node you have a few responsibilities:
-
-First, you must read from the Node.Ready() channel and process the updates
-it contains. These steps may be performed in parallel, except as noted in step
-2.
-
-1. Write HardState, Entries, and Snapshot to persistent storage if they are
-not empty. Note that when writing an Entry with Index i, any
-previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that
-no messages be sent until the latest HardState has been persisted to disk,
-and all Entries written by any previous Ready batch (Messages may be sent while
-entries from the same batch are being persisted). To reduce the I/O latency, an
-optimization can be applied to make leader write to disk in parallel with its
-followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
-MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
-large).
-
-Note: Marshalling messages is not thread-safe; it is important that you
-make sure that no new entries are persisted while marshalling.
-The easiest way to achieve this is to serialise the messages directly inside
-your main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine.
-If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
-to apply it to the node. The configuration change may be cancelled at this point
-by setting the NodeID field to zero before calling ApplyConfChange
-(but ApplyConfChange must be called one way or the other, and the decision to cancel
-must be based solely on the state machine and not external information such as
-the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates.
-This may be done at any time after step 1, although all updates must be processed
-in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an
-implementation of the Storage interface. The provided MemoryStorage
-type can be used for this (if you repopulate its state upon a
-restart), or you can supply your own disk-backed implementation.
-
-Third, when you receive a message from another node, pass it to Node.Step:
-
-	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
-		n.Step(ctx, m)
-	}
-
-Finally, you need to call Node.Tick() at regular intervals (probably
-via a time.Ticker). Raft has two important timeouts: heartbeat and the
-election timeout. However, internally to the raft package time is
-represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-  for {
-    select {
-    case <-s.Ticker:
-      n.Tick()
-    case rd := <-s.Node.Ready():
-      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
-      send(rd.Messages)
-      if !raft.IsEmptySnap(rd.Snapshot) {
-        processSnapshot(rd.Snapshot)
-      }
-      for _, entry := range rd.CommittedEntries {
-        process(entry)
-        if entry.Type == raftpb.EntryConfChange {
-          var cc raftpb.ConfChange
-          cc.Unmarshal(entry.Data)
-          s.Node.ApplyConfChange(cc)
-        }
-      }
-      s.Node.Advance()
-    case <-s.done:
-      return
-    }
-  }
-
-To propose changes to the state machine from your node take your application
-data, serialize it into a byte slice and call:
-
-	n.Propose(ctx, data)
-
-If the proposal is committed, data will appear in committed entries with type
-raftpb.EntryNormal. There is no guarantee that a proposed command will be
-committed; you may have to re-propose after a timeout.
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-	n.ProposeConfChange(ctx, cc)
-
-After config change is committed, some committed entry with type
-raftpb.EntryConfChange will be returned. You must apply it to node through:
-
-	var cc raftpb.ConfChange
-	cc.Unmarshal(data)
-	n.ApplyConfChange(cc)
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-Implementation notes
-
-This implementation is up to date with the final Raft thesis
-(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
-implementation of the membership change protocol differs somewhat from
-that described in chapter 4. The key invariant that membership changes
-happen one node at a time is preserved, but in our implementation the
-membership change takes effect when its entry is applied, not when it
-is added to the log (so the entry is committed under the old
-membership instead of the new). This is equivalent in terms of safety,
-since the old and new configurations are guaranteed to overlap.
-
-To ensure that we do not attempt to commit two membership changes at
-once by matching log positions (which would be unsafe since they
-should have different quorum requirements), we simply disallow any
-proposed membership change while any uncommitted change appears in
-the leader's log.
-
-This approach introduces a problem when you try to remove a member
-from a two-member cluster: If one of the members dies before the
-other one receives the commit of the confchange entry, then the member
-cannot be removed any more since the cluster cannot make progress.
-For this reason it is highly recommended to use three or more nodes in
-every cluster.
-
-MessageType
-
-Package raft sends and receives message in Protocol Buffer format (defined
-in raftpb package). Each state (follower, candidate, leader) implements its
-own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
-advancing with the given raftpb.Message. Each step is determined by its
-raftpb.MessageType. Note that every step is checked by one common method
-'Step' that safety-checks the terms of node and incoming message to prevent
-stale log entries:
-
-	'MsgHup' is used for election. If a node is a follower or candidate, the
-	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
-	candidate has not received any heartbeat before the election timeout, it
-	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
-	start a new election.
-
-	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
-	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
-	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
-	send periodic 'MsgHeartbeat' messages to its followers.
-
-	'MsgProp' proposes to append data to its log entries. This is a special
-	type to redirect proposals to leader. Therefore, send method overwrites
-	raftpb.Message's term with its HardState's term to avoid attaching its
-	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
-	method, the leader first calls the 'appendEntry' method to append entries
-	to its log, and then calls 'bcastAppend' method to send those entries to
-	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
-	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
-	method. It is stored with sender's ID and later forwarded to leader by
-	rafthttp package.
-
-	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
-	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
-	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
-	back to follower, because it indicates that there is a valid leader sending
-	'MsgApp' messages. Candidate and follower respond to this message in
-	'MsgAppResp' type.
-
-	'MsgAppResp' is response to log replication request('MsgApp'). When
-	'MsgApp' is passed to candidate or follower's Step method, it responds by
-	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
-	mailbox.
-
-	'MsgVote' requests votes for election. When a node is a follower or
-	candidate and 'MsgHup' is passed to its Step method, then the node calls
-	'campaign' method to campaign itself to become a leader. Once 'campaign'
-	method is called, the node becomes candidate and sends 'MsgVote' to peers
-	in cluster to request votes. When passed to leader or candidate's Step
-	method and the message's Term is lower than leader's or candidate's,
-	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
-	If leader or candidate receives 'MsgVote' with higher term, it will revert
-	back to follower. When 'MsgVote' is passed to follower, it votes for the
-	sender only when sender's last term is greater than MsgVote's term or
-	sender's last term is equal to MsgVote's term but sender's last committed
-	index is greater than or equal to follower's.
-
-	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
-	passed to candidate, the candidate calculates how many votes it has won. If
-	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
-	If candidate receives majority of votes of denials, it reverts back to
-	follower.
-
-	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
-	protocol. When Config.PreVote is true, a pre-election is carried out first
-	(using the same rules as a regular election), and no node increases its term
-	number unless the pre-election indicates that the campaigining node would win.
-	This minimizes disruption when a partitioned node rejoins the cluster.
-
-	'MsgSnap' requests to install a snapshot message. When a node has just
-	become a leader or the leader receives 'MsgProp' message, it calls
-	'bcastAppend' method, which then calls 'sendAppend' method to each
-	follower. In 'sendAppend', if a leader fails to get term or entries,
-	the leader requests snapshot by sending 'MsgSnap' type message.
-
-	'MsgSnapStatus' tells the result of snapshot install message. When a
-	follower rejected 'MsgSnap', it indicates the snapshot request with
-	'MsgSnap' had failed from network issues which causes the network layer
-	to fail to send out snapshots to its followers. Then leader considers
-	follower's progress as probe. When 'MsgSnap' were not rejected, it
-	indicates that the snapshot succeeded and the leader sets follower's
-	progress to probe and resumes its log replication.
-
-	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
-	to candidate and message's term is higher than candidate's, the candidate
-	reverts back to follower and updates its committed index from the one in
-	this heartbeat. And it sends the message to its mailbox. When
-	'MsgHeartbeat' is passed to follower's Step method and message's term is
-	higher than follower's, the follower updates its leaderID with the ID
-	from the message.
-
-	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
-	is passed to leader's Step method, the leader knows which follower
-	responded. And only when the leader's last committed index is greater than
-	follower's Match index, the leader runs 'sendAppend` method.
-
-	'MsgUnreachable' tells that request(message) wasn't delivered. When
-	'MsgUnreachable' is passed to leader's Step method, the leader discovers
-	that the follower that sent this 'MsgUnreachable' is not reachable, often
-	indicating 'MsgApp' is lost. When follower's progress state is replicate,
-	the leader sets it back to probe.
-
-*/
-package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
deleted file mode 100644
index c3036d3..0000000
--- a/vendor/github.com/coreos/etcd/raft/log.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-	"log"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type raftLog struct {
-	// storage contains all stable entries since the last snapshot.
-	storage Storage
-
-	// unstable contains all unstable entries and snapshot.
-	// they will be saved into storage.
-	unstable unstable
-
-	// committed is the highest log position that is known to be in
-	// stable storage on a quorum of nodes.
-	committed uint64
-	// applied is the highest log position that the application has
-	// been instructed to apply to its state machine.
-	// Invariant: applied <= committed
-	applied uint64
-
-	logger Logger
-}
-
-// newLog returns log using the given storage. It recovers the log to the state
-// that it just commits and applies the latest snapshot.
-func newLog(storage Storage, logger Logger) *raftLog {
-	if storage == nil {
-		log.Panic("storage must not be nil")
-	}
-	log := &raftLog{
-		storage: storage,
-		logger:  logger,
-	}
-	firstIndex, err := storage.FirstIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	lastIndex, err := storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	log.unstable.offset = lastIndex + 1
-	log.unstable.logger = logger
-	// Initialize our committed and applied pointers to the time of the last compaction.
-	log.committed = firstIndex - 1
-	log.applied = firstIndex - 1
-
-	return log
-}
-
-func (l *raftLog) String() string {
-	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
-}
-
-// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
-// it returns (last index of new entries, true).
-func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
-	if l.matchTerm(index, logTerm) {
-		lastnewi = index + uint64(len(ents))
-		ci := l.findConflict(ents)
-		switch {
-		case ci == 0:
-		case ci <= l.committed:
-			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
-		default:
-			offset := index + 1
-			l.append(ents[ci-offset:]...)
-		}
-		l.commitTo(min(committed, lastnewi))
-		return lastnewi, true
-	}
-	return 0, false
-}
-
-func (l *raftLog) append(ents ...pb.Entry) uint64 {
-	if len(ents) == 0 {
-		return l.lastIndex()
-	}
-	if after := ents[0].Index - 1; after < l.committed {
-		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
-	}
-	l.unstable.truncateAndAppend(ents)
-	return l.lastIndex()
-}
-
-// findConflict finds the index of the conflict.
-// It returns the first pair of conflicting entries between the existing
-// entries and the given entries, if there are any.
-// If there is no conflicting entries, and the existing entries contains
-// all the given entries, zero will be returned.
-// If there is no conflicting entries, but the given entries contains new
-// entries, the index of the first new entry will be returned.
-// An entry is considered to be conflicting if it has the same index but
-// a different term.
-// The first entry MUST have an index equal to the argument 'from'.
-// The index of the given entries MUST be continuously increasing.
-func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
-	for _, ne := range ents {
-		if !l.matchTerm(ne.Index, ne.Term) {
-			if ne.Index <= l.lastIndex() {
-				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
-					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
-			}
-			return ne.Index
-		}
-	}
-	return 0
-}
-
-func (l *raftLog) unstableEntries() []pb.Entry {
-	if len(l.unstable.entries) == 0 {
-		return nil
-	}
-	return l.unstable.entries
-}
-
-// nextEnts returns all the available entries for execution.
-// If applied is smaller than the index of snapshot, it returns all committed
-// entries after the index of snapshot.
-func (l *raftLog) nextEnts() (ents []pb.Entry) {
-	off := max(l.applied+1, l.firstIndex())
-	if l.committed+1 > off {
-		ents, err := l.slice(off, l.committed+1, noLimit)
-		if err != nil {
-			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
-		}
-		return ents
-	}
-	return nil
-}
-
-// hasNextEnts returns if there is any available entries for execution. This
-// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
-func (l *raftLog) hasNextEnts() bool {
-	off := max(l.applied+1, l.firstIndex())
-	return l.committed+1 > off
-}
-
-func (l *raftLog) snapshot() (pb.Snapshot, error) {
-	if l.unstable.snapshot != nil {
-		return *l.unstable.snapshot, nil
-	}
-	return l.storage.Snapshot()
-}
-
-func (l *raftLog) firstIndex() uint64 {
-	if i, ok := l.unstable.maybeFirstIndex(); ok {
-		return i
-	}
-	index, err := l.storage.FirstIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	return index
-}
-
-func (l *raftLog) lastIndex() uint64 {
-	if i, ok := l.unstable.maybeLastIndex(); ok {
-		return i
-	}
-	i, err := l.storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	return i
-}
-
-func (l *raftLog) commitTo(tocommit uint64) {
-	// never decrease commit
-	if l.committed < tocommit {
-		if l.lastIndex() < tocommit {
-			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
-		}
-		l.committed = tocommit
-	}
-}
-
-func (l *raftLog) appliedTo(i uint64) {
-	if i == 0 {
-		return
-	}
-	if l.committed < i || i < l.applied {
-		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
-	}
-	l.applied = i
-}
-
-func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
-
-func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
-
-func (l *raftLog) lastTerm() uint64 {
-	t, err := l.term(l.lastIndex())
-	if err != nil {
-		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
-	}
-	return t
-}
-
-func (l *raftLog) term(i uint64) (uint64, error) {
-	// the valid term range is [index of dummy entry, last index]
-	dummyIndex := l.firstIndex() - 1
-	if i < dummyIndex || i > l.lastIndex() {
-		// TODO: return an error instead?
-		return 0, nil
-	}
-
-	if t, ok := l.unstable.maybeTerm(i); ok {
-		return t, nil
-	}
-
-	t, err := l.storage.Term(i)
-	if err == nil {
-		return t, nil
-	}
-	if err == ErrCompacted || err == ErrUnavailable {
-		return 0, err
-	}
-	panic(err) // TODO(bdarnell)
-}
-
-func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
-	if i > l.lastIndex() {
-		return nil, nil
-	}
-	return l.slice(i, l.lastIndex()+1, maxsize)
-}
-
-// allEntries returns all entries in the log.
-func (l *raftLog) allEntries() []pb.Entry {
-	ents, err := l.entries(l.firstIndex(), noLimit)
-	if err == nil {
-		return ents
-	}
-	if err == ErrCompacted { // try again if there was a racing compaction
-		return l.allEntries()
-	}
-	// TODO (xiangli): handle error?
-	panic(err)
-}
-
-// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
-// by comparing the index and term of the last entries in the existing logs.
-// If the logs have last entries with different terms, then the log with the
-// later term is more up-to-date. If the logs end with the same term, then
-// whichever log has the larger lastIndex is more up-to-date. If the logs are
-// the same, the given log is up-to-date.
-func (l *raftLog) isUpToDate(lasti, term uint64) bool {
-	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
-}
-
-func (l *raftLog) matchTerm(i, term uint64) bool {
-	t, err := l.term(i)
-	if err != nil {
-		return false
-	}
-	return t == term
-}
-
-func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
-	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
-		l.commitTo(maxIndex)
-		return true
-	}
-	return false
-}
-
-func (l *raftLog) restore(s pb.Snapshot) {
-	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
-	l.committed = s.Metadata.Index
-	l.unstable.restore(s)
-}
-
-// slice returns a slice of log entries from lo through hi-1, inclusive.
-func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
-	err := l.mustCheckOutOfBounds(lo, hi)
-	if err != nil {
-		return nil, err
-	}
-	if lo == hi {
-		return nil, nil
-	}
-	var ents []pb.Entry
-	if lo < l.unstable.offset {
-		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
-		if err == ErrCompacted {
-			return nil, err
-		} else if err == ErrUnavailable {
-			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
-		} else if err != nil {
-			panic(err) // TODO(bdarnell)
-		}
-
-		// check if ents has reached the size limitation
-		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
-			return storedEnts, nil
-		}
-
-		ents = storedEnts
-	}
-	if hi > l.unstable.offset {
-		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
-		if len(ents) > 0 {
-			ents = append([]pb.Entry{}, ents...)
-			ents = append(ents, unstable...)
-		} else {
-			ents = unstable
-		}
-	}
-	return limitSize(ents, maxSize), nil
-}
-
-// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
-func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
-	if lo > hi {
-		l.logger.Panicf("invalid slice %d > %d", lo, hi)
-	}
-	fi := l.firstIndex()
-	if lo < fi {
-		return ErrCompacted
-	}
-
-	length := l.lastIndex() + 1 - fi
-	if lo < fi || hi > fi+length {
-		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
-	}
-	return nil
-}
-
-func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
-	if err == nil {
-		return t
-	}
-	if err == ErrCompacted {
-		return 0
-	}
-	l.logger.Panicf("unexpected error (%v)", err)
-	return 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
deleted file mode 100644
index 263af9c..0000000
--- a/vendor/github.com/coreos/etcd/raft/log_unstable.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// unstable.entries[i] has raft log position i+unstable.offset.
-// Note that unstable.offset may be less than the highest log
-// position in storage; this means that the next write to storage
-// might need to truncate the log before persisting unstable.entries.
-type unstable struct {
-	// the incoming unstable snapshot, if any.
-	snapshot *pb.Snapshot
-	// all entries that have not yet been written to storage.
-	entries []pb.Entry
-	offset  uint64
-
-	logger Logger
-}
-
-// maybeFirstIndex returns the index of the first possible entry in entries
-// if it has a snapshot.
-func (u *unstable) maybeFirstIndex() (uint64, bool) {
-	if u.snapshot != nil {
-		return u.snapshot.Metadata.Index + 1, true
-	}
-	return 0, false
-}
-
-// maybeLastIndex returns the last index if it has at least one
-// unstable entry or snapshot.
-func (u *unstable) maybeLastIndex() (uint64, bool) {
-	if l := len(u.entries); l != 0 {
-		return u.offset + uint64(l) - 1, true
-	}
-	if u.snapshot != nil {
-		return u.snapshot.Metadata.Index, true
-	}
-	return 0, false
-}
-
-// maybeTerm returns the term of the entry at index i, if there
-// is any.
-func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
-	if i < u.offset {
-		if u.snapshot == nil {
-			return 0, false
-		}
-		if u.snapshot.Metadata.Index == i {
-			return u.snapshot.Metadata.Term, true
-		}
-		return 0, false
-	}
-
-	last, ok := u.maybeLastIndex()
-	if !ok {
-		return 0, false
-	}
-	if i > last {
-		return 0, false
-	}
-	return u.entries[i-u.offset].Term, true
-}
-
-func (u *unstable) stableTo(i, t uint64) {
-	gt, ok := u.maybeTerm(i)
-	if !ok {
-		return
-	}
-	// if i < offset, term is matched with the snapshot
-	// only update the unstable entries if term is matched with
-	// an unstable entry.
-	if gt == t && i >= u.offset {
-		u.entries = u.entries[i+1-u.offset:]
-		u.offset = i + 1
-		u.shrinkEntriesArray()
-	}
-}
-
-// shrinkEntriesArray discards the underlying array used by the entries slice
-// if most of it isn't being used. This avoids holding references to a bunch of
-// potentially large entries that aren't needed anymore. Simply clearing the
-// entries wouldn't be safe because clients might still be using them.
-func (u *unstable) shrinkEntriesArray() {
-	// We replace the array if we're using less than half of the space in
-	// it. This number is fairly arbitrary, chosen as an attempt to balance
-	// memory usage vs number of allocations. It could probably be improved
-	// with some focused tuning.
-	const lenMultiple = 2
-	if len(u.entries) == 0 {
-		u.entries = nil
-	} else if len(u.entries)*lenMultiple < cap(u.entries) {
-		newEntries := make([]pb.Entry, len(u.entries))
-		copy(newEntries, u.entries)
-		u.entries = newEntries
-	}
-}
-
-func (u *unstable) stableSnapTo(i uint64) {
-	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
-		u.snapshot = nil
-	}
-}
-
-func (u *unstable) restore(s pb.Snapshot) {
-	u.offset = s.Metadata.Index + 1
-	u.entries = nil
-	u.snapshot = &s
-}
-
-func (u *unstable) truncateAndAppend(ents []pb.Entry) {
-	after := ents[0].Index
-	switch {
-	case after == u.offset+uint64(len(u.entries)):
-		// after is the next index in the u.entries
-		// directly append
-		u.entries = append(u.entries, ents...)
-	case after <= u.offset:
-		u.logger.Infof("replace the unstable entries from index %d", after)
-		// The log is being truncated to before our current offset
-		// portion, so set the offset and replace the entries
-		u.offset = after
-		u.entries = ents
-	default:
-		// truncate to after and copy to u.entries
-		// then append
-		u.logger.Infof("truncate the unstable entries before index %d", after)
-		u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
-		u.entries = append(u.entries, ents...)
-	}
-}
-
-func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
-	u.mustCheckOutOfBounds(lo, hi)
-	return u.entries[lo-u.offset : hi-u.offset]
-}
-
-// u.offset <= lo <= hi <= u.offset+len(u.offset)
-func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
-	if lo > hi {
-		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
-	}
-	upper := u.offset + uint64(len(u.entries))
-	if lo < u.offset || hi > upper {
-		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
deleted file mode 100644
index 426a77d..0000000
--- a/vendor/github.com/coreos/etcd/raft/logger.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-)
-
-type Logger interface {
-	Debug(v ...interface{})
-	Debugf(format string, v ...interface{})
-
-	Error(v ...interface{})
-	Errorf(format string, v ...interface{})
-
-	Info(v ...interface{})
-	Infof(format string, v ...interface{})
-
-	Warning(v ...interface{})
-	Warningf(format string, v ...interface{})
-
-	Fatal(v ...interface{})
-	Fatalf(format string, v ...interface{})
-
-	Panic(v ...interface{})
-	Panicf(format string, v ...interface{})
-}
-
-func SetLogger(l Logger) { raftLogger = l }
-
-var (
-	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
-	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
-	raftLogger    = Logger(defaultLogger)
-)
-
-const (
-	calldepth = 2
-)
-
-// DefaultLogger is a default implementation of the Logger interface.
-type DefaultLogger struct {
-	*log.Logger
-	debug bool
-}
-
-func (l *DefaultLogger) EnableTimestamps() {
-	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
-}
-
-func (l *DefaultLogger) EnableDebug() {
-	l.debug = true
-}
-
-func (l *DefaultLogger) Debug(v ...interface{}) {
-	if l.debug {
-		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
-	}
-}
-
-func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
-	if l.debug {
-		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
-	}
-}
-
-func (l *DefaultLogger) Info(v ...interface{}) {
-	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Infof(format string, v ...interface{}) {
-	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Error(v ...interface{}) {
-	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
-	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Warning(v ...interface{}) {
-	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
-	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Fatal(v ...interface{}) {
-	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
-	os.Exit(1)
-}
-
-func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
-	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
-	os.Exit(1)
-}
-
-func (l *DefaultLogger) Panic(v ...interface{}) {
-	l.Logger.Panic(v...)
-}
-
-func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
-	l.Logger.Panicf(format, v...)
-}
-
-func header(lvl, msg string) string {
-	return fmt.Sprintf("%s: %s", lvl, msg)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
deleted file mode 100644
index 33a9db8..0000000
--- a/vendor/github.com/coreos/etcd/raft/node.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"context"
-	"errors"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type SnapshotStatus int
-
-const (
-	SnapshotFinish  SnapshotStatus = 1
-	SnapshotFailure SnapshotStatus = 2
-)
-
-var (
-	emptyState = pb.HardState{}
-
-	// ErrStopped is returned by methods on Nodes that have been stopped.
-	ErrStopped = errors.New("raft: stopped")
-)
-
-// SoftState provides state that is useful for logging and debugging.
-// The state is volatile and does not need to be persisted to the WAL.
-type SoftState struct {
-	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
-	RaftState StateType
-}
-
-func (a *SoftState) equal(b *SoftState) bool {
-	return a.Lead == b.Lead && a.RaftState == b.RaftState
-}
-
-// Ready encapsulates the entries and messages that are ready to read,
-// be saved to stable storage, committed or sent to other peers.
-// All fields in Ready are read-only.
-type Ready struct {
-	// The current volatile state of a Node.
-	// SoftState will be nil if there is no update.
-	// It is not required to consume or store SoftState.
-	*SoftState
-
-	// The current state of a Node to be saved to stable storage BEFORE
-	// Messages are sent.
-	// HardState will be equal to empty state if there is no update.
-	pb.HardState
-
-	// ReadStates can be used for node to serve linearizable read requests locally
-	// when its applied index is greater than the index in ReadState.
-	// Note that the readState will be returned when raft receives msgReadIndex.
-	// The returned is only valid for the request that requested to read.
-	ReadStates []ReadState
-
-	// Entries specifies entries to be saved to stable storage BEFORE
-	// Messages are sent.
-	Entries []pb.Entry
-
-	// Snapshot specifies the snapshot to be saved to stable storage.
-	Snapshot pb.Snapshot
-
-	// CommittedEntries specifies entries to be committed to a
-	// store/state-machine. These have previously been committed to stable
-	// store.
-	CommittedEntries []pb.Entry
-
-	// Messages specifies outbound messages to be sent AFTER Entries are
-	// committed to stable storage.
-	// If it contains a MsgSnap message, the application MUST report back to raft
-	// when the snapshot has been received or has failed by calling ReportSnapshot.
-	Messages []pb.Message
-
-	// MustSync indicates whether the HardState and Entries must be synchronously
-	// written to disk or if an asynchronous write is permissible.
-	MustSync bool
-}
-
-func isHardStateEqual(a, b pb.HardState) bool {
-	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
-}
-
-// IsEmptyHardState returns true if the given HardState is empty.
-func IsEmptyHardState(st pb.HardState) bool {
-	return isHardStateEqual(st, emptyState)
-}
-
-// IsEmptySnap returns true if the given Snapshot is empty.
-func IsEmptySnap(sp pb.Snapshot) bool {
-	return sp.Metadata.Index == 0
-}
-
-func (rd Ready) containsUpdates() bool {
-	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
-		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
-		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
-}
-
-// Node represents a node in a raft cluster.
-type Node interface {
-	// Tick increments the internal logical clock for the Node by a single tick. Election
-	// timeouts and heartbeat timeouts are in units of ticks.
-	Tick()
-	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
-	Campaign(ctx context.Context) error
-	// Propose proposes that data be appended to the log.
-	Propose(ctx context.Context, data []byte) error
-	// ProposeConfChange proposes config change.
-	// At most one ConfChange can be in the process of going through consensus.
-	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
-	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
-	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
-	Step(ctx context.Context, msg pb.Message) error
-
-	// Ready returns a channel that returns the current point-in-time state.
-	// Users of the Node must call Advance after retrieving the state returned by Ready.
-	//
-	// NOTE: No committed entries from the next Ready may be applied until all committed entries
-	// and snapshots from the previous one have finished.
-	Ready() <-chan Ready
-
-	// Advance notifies the Node that the application has saved progress up to the last Ready.
-	// It prepares the node to return the next available Ready.
-	//
-	// The application should generally call Advance after it applies the entries in last Ready.
-	//
-	// However, as an optimization, the application may call Advance while it is applying the
-	// commands. For example. when the last Ready contains a snapshot, the application might take
-	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
-	// progress, it can call Advance before finishing applying the last ready.
-	Advance()
-	// ApplyConfChange applies config change to the local node.
-	// Returns an opaque ConfState protobuf which must be recorded
-	// in snapshots. Will never return nil; it returns a pointer only
-	// to match MemoryStorage.Compact.
-	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
-
-	// TransferLeadership attempts to transfer leadership to the given transferee.
-	TransferLeadership(ctx context.Context, lead, transferee uint64)
-
-	// ReadIndex request a read state. The read state will be set in the ready.
-	// Read state has a read index. Once the application advances further than the read
-	// index, any linearizable read requests issued before the read request can be
-	// processed safely. The read state will have the same rctx attached.
-	ReadIndex(ctx context.Context, rctx []byte) error
-
-	// Status returns the current status of the raft state machine.
-	Status() Status
-	// ReportUnreachable reports the given node is not reachable for the last send.
-	ReportUnreachable(id uint64)
-	// ReportSnapshot reports the status of the sent snapshot.
-	ReportSnapshot(id uint64, status SnapshotStatus)
-	// Stop performs any necessary termination of the Node.
-	Stop()
-}
-
-type Peer struct {
-	ID      uint64
-	Context []byte
-}
-
-// StartNode returns a new Node given configuration and a list of raft peers.
-// It appends a ConfChangeAddNode entry for each given peer to the initial log.
-func StartNode(c *Config, peers []Peer) Node {
-	r := newRaft(c)
-	// become the follower at term 1 and apply initial configuration
-	// entries of term 1
-	r.becomeFollower(1, None)
-	for _, peer := range peers {
-		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
-		d, err := cc.Marshal()
-		if err != nil {
-			panic("unexpected marshal error")
-		}
-		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
-		r.raftLog.append(e)
-	}
-	// Mark these initial entries as committed.
-	// TODO(bdarnell): These entries are still unstable; do we need to preserve
-	// the invariant that committed < unstable?
-	r.raftLog.committed = r.raftLog.lastIndex()
-	// Now apply them, mainly so that the application can call Campaign
-	// immediately after StartNode in tests. Note that these nodes will
-	// be added to raft twice: here and when the application's Ready
-	// loop calls ApplyConfChange. The calls to addNode must come after
-	// all calls to raftLog.append so progress.next is set after these
-	// bootstrapping entries (it is an error if we try to append these
-	// entries since they have already been committed).
-	// We do not set raftLog.applied so the application will be able
-	// to observe all conf changes via Ready.CommittedEntries.
-	for _, peer := range peers {
-		r.addNode(peer.ID)
-	}
-
-	n := newNode()
-	n.logger = c.Logger
-	go n.run(r)
-	return &n
-}
-
-// RestartNode is similar to StartNode but does not take a list of peers.
-// The current membership of the cluster will be restored from the Storage.
-// If the caller has an existing state machine, pass in the last log index that
-// has been applied to it; otherwise use zero.
-func RestartNode(c *Config) Node {
-	r := newRaft(c)
-
-	n := newNode()
-	n.logger = c.Logger
-	go n.run(r)
-	return &n
-}
-
-// node is the canonical implementation of the Node interface
-type node struct {
-	propc      chan pb.Message
-	recvc      chan pb.Message
-	confc      chan pb.ConfChange
-	confstatec chan pb.ConfState
-	readyc     chan Ready
-	advancec   chan struct{}
-	tickc      chan struct{}
-	done       chan struct{}
-	stop       chan struct{}
-	status     chan chan Status
-
-	logger Logger
-}
-
-func newNode() node {
-	return node{
-		propc:      make(chan pb.Message),
-		recvc:      make(chan pb.Message),
-		confc:      make(chan pb.ConfChange),
-		confstatec: make(chan pb.ConfState),
-		readyc:     make(chan Ready),
-		advancec:   make(chan struct{}),
-		// make tickc a buffered chan, so raft node can buffer some ticks when the node
-		// is busy processing raft messages. Raft node will resume process buffered
-		// ticks when it becomes idle.
-		tickc:  make(chan struct{}, 128),
-		done:   make(chan struct{}),
-		stop:   make(chan struct{}),
-		status: make(chan chan Status),
-	}
-}
-
-func (n *node) Stop() {
-	select {
-	case n.stop <- struct{}{}:
-		// Not already stopped, so trigger it
-	case <-n.done:
-		// Node has already been stopped - no need to do anything
-		return
-	}
-	// Block until the stop has been acknowledged by run()
-	<-n.done
-}
-
-func (n *node) run(r *raft) {
-	var propc chan pb.Message
-	var readyc chan Ready
-	var advancec chan struct{}
-	var prevLastUnstablei, prevLastUnstablet uint64
-	var havePrevLastUnstablei bool
-	var prevSnapi uint64
-	var rd Ready
-
-	lead := None
-	prevSoftSt := r.softState()
-	prevHardSt := emptyState
-
-	for {
-		if advancec != nil {
-			readyc = nil
-		} else {
-			rd = newReady(r, prevSoftSt, prevHardSt)
-			if rd.containsUpdates() {
-				readyc = n.readyc
-			} else {
-				readyc = nil
-			}
-		}
-
-		if lead != r.lead {
-			if r.hasLeader() {
-				if lead == None {
-					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
-				} else {
-					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
-				}
-				propc = n.propc
-			} else {
-				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
-				propc = nil
-			}
-			lead = r.lead
-		}
-
-		select {
-		// TODO: maybe buffer the config propose if there exists one (the way
-		// described in raft dissertation)
-		// Currently it is dropped in Step silently.
-		case m := <-propc:
-			m.From = r.id
-			r.Step(m)
-		case m := <-n.recvc:
-			// filter out response message from unknown From.
-			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
-				r.Step(m) // raft never returns an error
-			}
-		case cc := <-n.confc:
-			if cc.NodeID == None {
-				r.resetPendingConf()
-				select {
-				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
-				case <-n.done:
-				}
-				break
-			}
-			switch cc.Type {
-			case pb.ConfChangeAddNode:
-				r.addNode(cc.NodeID)
-			case pb.ConfChangeAddLearnerNode:
-				r.addLearner(cc.NodeID)
-			case pb.ConfChangeRemoveNode:
-				// block incoming proposal when local node is
-				// removed
-				if cc.NodeID == r.id {
-					propc = nil
-				}
-				r.removeNode(cc.NodeID)
-			case pb.ConfChangeUpdateNode:
-				r.resetPendingConf()
-			default:
-				panic("unexpected conf type")
-			}
-			select {
-			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
-			case <-n.done:
-			}
-		case <-n.tickc:
-			r.tick()
-		case readyc <- rd:
-			if rd.SoftState != nil {
-				prevSoftSt = rd.SoftState
-			}
-			if len(rd.Entries) > 0 {
-				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
-				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
-				havePrevLastUnstablei = true
-			}
-			if !IsEmptyHardState(rd.HardState) {
-				prevHardSt = rd.HardState
-			}
-			if !IsEmptySnap(rd.Snapshot) {
-				prevSnapi = rd.Snapshot.Metadata.Index
-			}
-
-			r.msgs = nil
-			r.readStates = nil
-			advancec = n.advancec
-		case <-advancec:
-			if prevHardSt.Commit != 0 {
-				r.raftLog.appliedTo(prevHardSt.Commit)
-			}
-			if havePrevLastUnstablei {
-				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
-				havePrevLastUnstablei = false
-			}
-			r.raftLog.stableSnapTo(prevSnapi)
-			advancec = nil
-		case c := <-n.status:
-			c <- getStatus(r)
-		case <-n.stop:
-			close(n.done)
-			return
-		}
-	}
-}
-
-// Tick increments the internal logical clock for this Node. Election timeouts
-// and heartbeat timeouts are in units of ticks.
-func (n *node) Tick() {
-	select {
-	case n.tickc <- struct{}{}:
-	case <-n.done:
-	default:
-		n.logger.Warningf("A tick missed to fire. Node blocks too long!")
-	}
-}
-
-func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
-
-func (n *node) Propose(ctx context.Context, data []byte) error {
-	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
-}
-
-func (n *node) Step(ctx context.Context, m pb.Message) error {
-	// ignore unexpected local messages receiving over network
-	if IsLocalMsg(m.Type) {
-		// TODO: return an error?
-		return nil
-	}
-	return n.step(ctx, m)
-}
-
-func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
-	data, err := cc.Marshal()
-	if err != nil {
-		return err
-	}
-	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
-}
-
-// Step advances the state machine using msgs. The ctx.Err() will be returned,
-// if any.
-func (n *node) step(ctx context.Context, m pb.Message) error {
-	ch := n.recvc
-	if m.Type == pb.MsgProp {
-		ch = n.propc
-	}
-
-	select {
-	case ch <- m:
-		return nil
-	case <-ctx.Done():
-		return ctx.Err()
-	case <-n.done:
-		return ErrStopped
-	}
-}
-
-func (n *node) Ready() <-chan Ready { return n.readyc }
-
-func (n *node) Advance() {
-	select {
-	case n.advancec <- struct{}{}:
-	case <-n.done:
-	}
-}
-
-func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
-	var cs pb.ConfState
-	select {
-	case n.confc <- cc:
-	case <-n.done:
-	}
-	select {
-	case cs = <-n.confstatec:
-	case <-n.done:
-	}
-	return &cs
-}
-
-func (n *node) Status() Status {
-	c := make(chan Status)
-	select {
-	case n.status <- c:
-		return <-c
-	case <-n.done:
-		return Status{}
-	}
-}
-
-func (n *node) ReportUnreachable(id uint64) {
-	select {
-	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
-	case <-n.done:
-	}
-}
-
-func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
-	rej := status == SnapshotFailure
-
-	select {
-	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
-	case <-n.done:
-	}
-}
-
-func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
-	select {
-	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
-	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
-	case <-n.done:
-	case <-ctx.Done():
-	}
-}
-
-func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
-	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
-
-func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
-	rd := Ready{
-		Entries:          r.raftLog.unstableEntries(),
-		CommittedEntries: r.raftLog.nextEnts(),
-		Messages:         r.msgs,
-	}
-	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
-		rd.SoftState = softSt
-	}
-	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
-		rd.HardState = hardSt
-	}
-	if r.raftLog.unstable.snapshot != nil {
-		rd.Snapshot = *r.raftLog.unstable.snapshot
-	}
-	if len(r.readStates) != 0 {
-		rd.ReadStates = r.readStates
-	}
-	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
-	return rd
-}
-
-// MustSync returns true if the hard state and count of Raft entries indicate
-// that a synchronous write to persistent storage is required.
-func MustSync(st, prevst pb.HardState, entsnum int) bool {
-	// Persistent state on all servers:
-	// (Updated on stable storage before responding to RPCs)
-	// currentTerm
-	// votedFor
-	// log entries[]
-	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
-}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
deleted file mode 100644
index ef3787d..0000000
--- a/vendor/github.com/coreos/etcd/raft/progress.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import "fmt"
-
-const (
-	ProgressStateProbe ProgressStateType = iota
-	ProgressStateReplicate
-	ProgressStateSnapshot
-)
-
-type ProgressStateType uint64
-
-var prstmap = [...]string{
-	"ProgressStateProbe",
-	"ProgressStateReplicate",
-	"ProgressStateSnapshot",
-}
-
-func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
-
-// Progress represents a follower’s progress in the view of the leader. Leader maintains
-// progresses of all followers, and sends entries to the follower based on its progress.
-type Progress struct {
-	Match, Next uint64
-	// State defines how the leader should interact with the follower.
-	//
-	// When in ProgressStateProbe, leader sends at most one replication message
-	// per heartbeat interval. It also probes actual progress of the follower.
-	//
-	// When in ProgressStateReplicate, leader optimistically increases next
-	// to the latest entry sent after sending replication message. This is
-	// an optimized state for fast replicating log entries to the follower.
-	//
-	// When in ProgressStateSnapshot, leader should have sent out snapshot
-	// before and stops sending any replication message.
-	State ProgressStateType
-
-	// Paused is used in ProgressStateProbe.
-	// When Paused is true, raft should pause sending replication message to this peer.
-	Paused bool
-	// PendingSnapshot is used in ProgressStateSnapshot.
-	// If there is a pending snapshot, the pendingSnapshot will be set to the
-	// index of the snapshot. If pendingSnapshot is set, the replication process of
-	// this Progress will be paused. raft will not resend snapshot until the pending one
-	// is reported to be failed.
-	PendingSnapshot uint64
-
-	// RecentActive is true if the progress is recently active. Receiving any messages
-	// from the corresponding follower indicates the progress is active.
-	// RecentActive can be reset to false after an election timeout.
-	RecentActive bool
-
-	// inflights is a sliding window for the inflight messages.
-	// Each inflight message contains one or more log entries.
-	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
-	// Thus inflight effectively limits both the number of inflight messages
-	// and the bandwidth each Progress can use.
-	// When inflights is full, no more message should be sent.
-	// When a leader sends out a message, the index of the last
-	// entry should be added to inflights. The index MUST be added
-	// into inflights in order.
-	// When a leader receives a reply, the previous inflights should
-	// be freed by calling inflights.freeTo with the index of the last
-	// received entry.
-	ins *inflights
-
-	// IsLearner is true if this progress is tracked for a learner.
-	IsLearner bool
-}
-
-func (pr *Progress) resetState(state ProgressStateType) {
-	pr.Paused = false
-	pr.PendingSnapshot = 0
-	pr.State = state
-	pr.ins.reset()
-}
-
-func (pr *Progress) becomeProbe() {
-	// If the original state is ProgressStateSnapshot, progress knows that
-	// the pending snapshot has been sent to this peer successfully, then
-	// probes from pendingSnapshot + 1.
-	if pr.State == ProgressStateSnapshot {
-		pendingSnapshot := pr.PendingSnapshot
-		pr.resetState(ProgressStateProbe)
-		pr.Next = max(pr.Match+1, pendingSnapshot+1)
-	} else {
-		pr.resetState(ProgressStateProbe)
-		pr.Next = pr.Match + 1
-	}
-}
-
-func (pr *Progress) becomeReplicate() {
-	pr.resetState(ProgressStateReplicate)
-	pr.Next = pr.Match + 1
-}
-
-func (pr *Progress) becomeSnapshot(snapshoti uint64) {
-	pr.resetState(ProgressStateSnapshot)
-	pr.PendingSnapshot = snapshoti
-}
-
-// maybeUpdate returns false if the given n index comes from an outdated message.
-// Otherwise it updates the progress and returns true.
-func (pr *Progress) maybeUpdate(n uint64) bool {
-	var updated bool
-	if pr.Match < n {
-		pr.Match = n
-		updated = true
-		pr.resume()
-	}
-	if pr.Next < n+1 {
-		pr.Next = n + 1
-	}
-	return updated
-}
-
-func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
-
-// maybeDecrTo returns false if the given to index comes from an out of order message.
-// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
-func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
-	if pr.State == ProgressStateReplicate {
-		// the rejection must be stale if the progress has matched and "rejected"
-		// is smaller than "match".
-		if rejected <= pr.Match {
-			return false
-		}
-		// directly decrease next to match + 1
-		pr.Next = pr.Match + 1
-		return true
-	}
-
-	// the rejection must be stale if "rejected" does not match next - 1
-	if pr.Next-1 != rejected {
-		return false
-	}
-
-	if pr.Next = min(rejected, last+1); pr.Next < 1 {
-		pr.Next = 1
-	}
-	pr.resume()
-	return true
-}
-
-func (pr *Progress) pause()  { pr.Paused = true }
-func (pr *Progress) resume() { pr.Paused = false }
-
-// IsPaused returns whether sending log entries to this node has been
-// paused. A node may be paused because it has rejected recent
-// MsgApps, is currently waiting for a snapshot, or has reached the
-// MaxInflightMsgs limit.
-func (pr *Progress) IsPaused() bool {
-	switch pr.State {
-	case ProgressStateProbe:
-		return pr.Paused
-	case ProgressStateReplicate:
-		return pr.ins.full()
-	case ProgressStateSnapshot:
-		return true
-	default:
-		panic("unexpected state")
-	}
-}
-
-func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
-
-// needSnapshotAbort returns true if snapshot progress's Match
-// is equal or higher than the pendingSnapshot.
-func (pr *Progress) needSnapshotAbort() bool {
-	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
-}
-
-func (pr *Progress) String() string {
-	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
-}
-
-type inflights struct {
-	// the starting index in the buffer
-	start int
-	// number of inflights in the buffer
-	count int
-
-	// the size of the buffer
-	size int
-
-	// buffer contains the index of the last entry
-	// inside one message.
-	buffer []uint64
-}
-
-func newInflights(size int) *inflights {
-	return &inflights{
-		size: size,
-	}
-}
-
-// add adds an inflight into inflights
-func (in *inflights) add(inflight uint64) {
-	if in.full() {
-		panic("cannot add into a full inflights")
-	}
-	next := in.start + in.count
-	size := in.size
-	if next >= size {
-		next -= size
-	}
-	if next >= len(in.buffer) {
-		in.growBuf()
-	}
-	in.buffer[next] = inflight
-	in.count++
-}
-
-// grow the inflight buffer by doubling up to inflights.size. We grow on demand
-// instead of preallocating to inflights.size to handle systems which have
-// thousands of Raft groups per process.
-func (in *inflights) growBuf() {
-	newSize := len(in.buffer) * 2
-	if newSize == 0 {
-		newSize = 1
-	} else if newSize > in.size {
-		newSize = in.size
-	}
-	newBuffer := make([]uint64, newSize)
-	copy(newBuffer, in.buffer)
-	in.buffer = newBuffer
-}
-
-// freeTo frees the inflights smaller or equal to the given `to` flight.
-func (in *inflights) freeTo(to uint64) {
-	if in.count == 0 || to < in.buffer[in.start] {
-		// out of the left side of the window
-		return
-	}
-
-	idx := in.start
-	var i int
-	for i = 0; i < in.count; i++ {
-		if to < in.buffer[idx] { // found the first large inflight
-			break
-		}
-
-		// increase index and maybe rotate
-		size := in.size
-		if idx++; idx >= size {
-			idx -= size
-		}
-	}
-	// free i inflights and set new start index
-	in.count -= i
-	in.start = idx
-	if in.count == 0 {
-		// inflights is empty, reset the start index so that we don't grow the
-		// buffer unnecessarily.
-		in.start = 0
-	}
-}
-
-func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
-
-// full returns true if the inflights is full.
-func (in *inflights) full() bool {
-	return in.count == in.size
-}
-
-// resets frees all inflights.
-func (in *inflights) reset() {
-	in.count = 0
-	in.start = 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
deleted file mode 100644
index 22ff138..0000000
--- a/vendor/github.com/coreos/etcd/raft/raft.go
+++ /dev/null
@@ -1,1407 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"math"
-	"math/rand"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// None is a placeholder node ID used when there is no leader.
-const None uint64 = 0
-const noLimit = math.MaxUint64
-
-// Possible values for StateType.
-const (
-	StateFollower StateType = iota
-	StateCandidate
-	StateLeader
-	StatePreCandidate
-	numStates
-)
-
-type ReadOnlyOption int
-
-const (
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	ReadOnlySafe ReadOnlyOption = iota
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	ReadOnlyLeaseBased
-)
-
-// Possible values for CampaignType
-const (
-	// campaignPreElection represents the first phase of a normal election when
-	// Config.PreVote is true.
-	campaignPreElection CampaignType = "CampaignPreElection"
-	// campaignElection represents a normal (time-based) election (the second phase
-	// of the election when Config.PreVote is true).
-	campaignElection CampaignType = "CampaignElection"
-	// campaignTransfer represents the type of leader transfer
-	campaignTransfer CampaignType = "CampaignTransfer"
-)
-
-// lockedRand is a small wrapper around rand.Rand to provide
-// synchronization. Only the methods needed by the code are exposed
-// (e.g. Intn).
-type lockedRand struct {
-	mu   sync.Mutex
-	rand *rand.Rand
-}
-
-func (r *lockedRand) Intn(n int) int {
-	r.mu.Lock()
-	v := r.rand.Intn(n)
-	r.mu.Unlock()
-	return v
-}
-
-var globalRand = &lockedRand{
-	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
-}
-
-// CampaignType represents the type of campaigning
-// the reason we use the type of string instead of uint64
-// is because it's simpler to compare and fill in raft entries
-type CampaignType string
-
-// StateType represents the role of a node in a cluster.
-type StateType uint64
-
-var stmap = [...]string{
-	"StateFollower",
-	"StateCandidate",
-	"StateLeader",
-	"StatePreCandidate",
-}
-
-func (st StateType) String() string {
-	return stmap[uint64(st)]
-}
-
-// Config contains the parameters to start a raft.
-type Config struct {
-	// ID is the identity of the local raft. ID cannot be 0.
-	ID uint64
-
-	// peers contains the IDs of all nodes (including self) in the raft cluster. It
-	// should only be set when starting a new raft cluster. Restarting raft from
-	// previous configuration will panic if peers is set. peer is private and only
-	// used for testing right now.
-	peers []uint64
-
-	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
-	// learners only receives entries from the leader node. It does not vote or promote itself.
-	learners []uint64
-
-	// ElectionTick is the number of Node.Tick invocations that must pass between
-	// elections. That is, if a follower does not receive any message from the
-	// leader of current term before ElectionTick has elapsed, it will become
-	// candidate and start an election. ElectionTick must be greater than
-	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
-	// unnecessary leader switching.
-	ElectionTick int
-	// HeartbeatTick is the number of Node.Tick invocations that must pass between
-	// heartbeats. That is, a leader sends heartbeat messages to maintain its
-	// leadership every HeartbeatTick ticks.
-	HeartbeatTick int
-
-	// Storage is the storage for raft. raft generates entries and states to be
-	// stored in storage. raft reads the persisted entries and states out of
-	// Storage when it needs. raft reads out the previous state and configuration
-	// out of storage when restarting.
-	Storage Storage
-	// Applied is the last applied index. It should only be set when restarting
-	// raft. raft will not return entries to the application smaller or equal to
-	// Applied. If Applied is unset when restarting, raft might return previous
-	// applied entries. This is a very application dependent configuration.
-	Applied uint64
-
-	// MaxSizePerMsg limits the max size of each append message. Smaller value
-	// lowers the raft recovery cost(initial probing and message lost during normal
-	// operation). On the other side, it might affect the throughput during normal
-	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
-	// message.
-	MaxSizePerMsg uint64
-	// MaxInflightMsgs limits the max number of in-flight append messages during
-	// optimistic replication phase. The application transportation layer usually
-	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
-	// overflowing that sending buffer. TODO (xiangli): feedback to application to
-	// limit the proposal rate?
-	MaxInflightMsgs int
-
-	// CheckQuorum specifies if the leader should check quorum activity. Leader
-	// steps down when quorum is not active for an electionTimeout.
-	CheckQuorum bool
-
-	// PreVote enables the Pre-Vote algorithm described in raft thesis section
-	// 9.6. This prevents disruption when a node that has been partitioned away
-	// rejoins the cluster.
-	PreVote bool
-
-	// ReadOnlyOption specifies how the read only request is processed.
-	//
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	//
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
-	ReadOnlyOption ReadOnlyOption
-
-	// Logger is the logger used for raft log. For multinode which can host
-	// multiple raft group, each raft group can have its own logger
-	Logger Logger
-
-	// DisableProposalForwarding set to true means that followers will drop
-	// proposals, rather than forwarding them to the leader. One use case for
-	// this feature would be in a situation where the Raft leader is used to
-	// compute the data of a proposal, for example, adding a timestamp from a
-	// hybrid logical clock to data in a monotonically increasing way. Forwarding
-	// should be disabled to prevent a follower with an innaccurate hybrid
-	// logical clock from assigning the timestamp and then forwarding the data
-	// to the leader.
-	DisableProposalForwarding bool
-}
-
-func (c *Config) validate() error {
-	if c.ID == None {
-		return errors.New("cannot use none as id")
-	}
-
-	if c.HeartbeatTick <= 0 {
-		return errors.New("heartbeat tick must be greater than 0")
-	}
-
-	if c.ElectionTick <= c.HeartbeatTick {
-		return errors.New("election tick must be greater than heartbeat tick")
-	}
-
-	if c.Storage == nil {
-		return errors.New("storage cannot be nil")
-	}
-
-	if c.MaxInflightMsgs <= 0 {
-		return errors.New("max inflight messages must be greater than 0")
-	}
-
-	if c.Logger == nil {
-		c.Logger = raftLogger
-	}
-
-	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
-		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
-	}
-
-	return nil
-}
-
-type raft struct {
-	id uint64
-
-	Term uint64
-	Vote uint64
-
-	readStates []ReadState
-
-	// the log
-	raftLog *raftLog
-
-	maxInflight int
-	maxMsgSize  uint64
-	prs         map[uint64]*Progress
-	learnerPrs  map[uint64]*Progress
-
-	state StateType
-
-	// isLearner is true if the local raft node is a learner.
-	isLearner bool
-
-	votes map[uint64]bool
-
-	msgs []pb.Message
-
-	// the leader id
-	lead uint64
-	// leadTransferee is id of the leader transfer target when its value is not zero.
-	// Follow the procedure defined in raft thesis 3.10.
-	leadTransferee uint64
-	// New configuration is ignored if there exists unapplied configuration.
-	pendingConf bool
-
-	readOnly *readOnly
-
-	// number of ticks since it reached last electionTimeout when it is leader
-	// or candidate.
-	// number of ticks since it reached last electionTimeout or received a
-	// valid message from current leader when it is a follower.
-	electionElapsed int
-
-	// number of ticks since it reached last heartbeatTimeout.
-	// only leader keeps heartbeatElapsed.
-	heartbeatElapsed int
-
-	checkQuorum bool
-	preVote     bool
-
-	heartbeatTimeout int
-	electionTimeout  int
-	// randomizedElectionTimeout is a random number between
-	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
-	// when raft changes its state to follower or candidate.
-	randomizedElectionTimeout int
-	disableProposalForwarding bool
-
-	tick func()
-	step stepFunc
-
-	logger Logger
-}
-
-func newRaft(c *Config) *raft {
-	if err := c.validate(); err != nil {
-		panic(err.Error())
-	}
-	raftlog := newLog(c.Storage, c.Logger)
-	hs, cs, err := c.Storage.InitialState()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	peers := c.peers
-	learners := c.learners
-	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
-		if len(peers) > 0 || len(learners) > 0 {
-			// TODO(bdarnell): the peers argument is always nil except in
-			// tests; the argument should be removed and these tests should be
-			// updated to specify their nodes through a snapshot.
-			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
-		}
-		peers = cs.Nodes
-		learners = cs.Learners
-	}
-	r := &raft{
-		id:                        c.ID,
-		lead:                      None,
-		isLearner:                 false,
-		raftLog:                   raftlog,
-		maxMsgSize:                c.MaxSizePerMsg,
-		maxInflight:               c.MaxInflightMsgs,
-		prs:                       make(map[uint64]*Progress),
-		learnerPrs:                make(map[uint64]*Progress),
-		electionTimeout:           c.ElectionTick,
-		heartbeatTimeout:          c.HeartbeatTick,
-		logger:                    c.Logger,
-		checkQuorum:               c.CheckQuorum,
-		preVote:                   c.PreVote,
-		readOnly:                  newReadOnly(c.ReadOnlyOption),
-		disableProposalForwarding: c.DisableProposalForwarding,
-	}
-	for _, p := range peers {
-		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
-	}
-	for _, p := range learners {
-		if _, ok := r.prs[p]; ok {
-			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
-		}
-		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
-		if r.id == p {
-			r.isLearner = true
-		}
-	}
-
-	if !isHardStateEqual(hs, emptyState) {
-		r.loadState(hs)
-	}
-	if c.Applied > 0 {
-		raftlog.appliedTo(c.Applied)
-	}
-	r.becomeFollower(r.Term, None)
-
-	var nodesStrs []string
-	for _, n := range r.nodes() {
-		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
-	}
-
-	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
-		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
-	return r
-}
-
-func (r *raft) hasLeader() bool { return r.lead != None }
-
-func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
-
-func (r *raft) hardState() pb.HardState {
-	return pb.HardState{
-		Term:   r.Term,
-		Vote:   r.Vote,
-		Commit: r.raftLog.committed,
-	}
-}
-
-func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
-
-func (r *raft) nodes() []uint64 {
-	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
-	for id := range r.prs {
-		nodes = append(nodes, id)
-	}
-	for id := range r.learnerPrs {
-		nodes = append(nodes, id)
-	}
-	sort.Sort(uint64Slice(nodes))
-	return nodes
-}
-
-// send persists state to stable storage and then sends to its mailbox.
-func (r *raft) send(m pb.Message) {
-	m.From = r.id
-	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
-		if m.Term == 0 {
-			// All {pre-,}campaign messages need to have the term set when
-			// sending.
-			// - MsgVote: m.Term is the term the node is campaigning for,
-			//   non-zero as we increment the term when campaigning.
-			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
-			//   granted, non-zero for the same reason MsgVote is
-			// - MsgPreVote: m.Term is the term the node will campaign,
-			//   non-zero as we use m.Term to indicate the next term we'll be
-			//   campaigning for
-			// - MsgPreVoteResp: m.Term is the term received in the original
-			//   MsgPreVote if the pre-vote was granted, non-zero for the
-			//   same reasons MsgPreVote is
-			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
-		}
-	} else {
-		if m.Term != 0 {
-			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
-		}
-		// do not attach term to MsgProp, MsgReadIndex
-		// proposals are a way to forward to the leader and
-		// should be treated as local message.
-		// MsgReadIndex is also forwarded to leader.
-		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
-			m.Term = r.Term
-		}
-	}
-	r.msgs = append(r.msgs, m)
-}
-
-func (r *raft) getProgress(id uint64) *Progress {
-	if pr, ok := r.prs[id]; ok {
-		return pr
-	}
-
-	return r.learnerPrs[id]
-}
-
-// sendAppend sends RPC, with entries to the given peer.
-func (r *raft) sendAppend(to uint64) {
-	pr := r.getProgress(to)
-	if pr.IsPaused() {
-		return
-	}
-	m := pb.Message{}
-	m.To = to
-
-	term, errt := r.raftLog.term(pr.Next - 1)
-	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
-
-	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
-		if !pr.RecentActive {
-			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
-			return
-		}
-
-		m.Type = pb.MsgSnap
-		snapshot, err := r.raftLog.snapshot()
-		if err != nil {
-			if err == ErrSnapshotTemporarilyUnavailable {
-				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
-				return
-			}
-			panic(err) // TODO(bdarnell)
-		}
-		if IsEmptySnap(snapshot) {
-			panic("need non-empty snapshot")
-		}
-		m.Snapshot = snapshot
-		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
-		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
-			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
-		pr.becomeSnapshot(sindex)
-		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
-	} else {
-		m.Type = pb.MsgApp
-		m.Index = pr.Next - 1
-		m.LogTerm = term
-		m.Entries = ents
-		m.Commit = r.raftLog.committed
-		if n := len(m.Entries); n != 0 {
-			switch pr.State {
-			// optimistically increase the next when in ProgressStateReplicate
-			case ProgressStateReplicate:
-				last := m.Entries[n-1].Index
-				pr.optimisticUpdate(last)
-				pr.ins.add(last)
-			case ProgressStateProbe:
-				pr.pause()
-			default:
-				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
-			}
-		}
-	}
-	r.send(m)
-}
-
-// sendHeartbeat sends an empty MsgApp
-func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
-	// Attach the commit as min(to.matched, r.committed).
-	// When the leader sends out heartbeat message,
-	// the receiver(follower) might not be matched with the leader
-	// or it might not have all the committed entries.
-	// The leader MUST NOT forward the follower's commit to
-	// an unmatched index.
-	commit := min(r.getProgress(to).Match, r.raftLog.committed)
-	m := pb.Message{
-		To:      to,
-		Type:    pb.MsgHeartbeat,
-		Commit:  commit,
-		Context: ctx,
-	}
-
-	r.send(m)
-}
-
-func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
-	for id, pr := range r.prs {
-		f(id, pr)
-	}
-
-	for id, pr := range r.learnerPrs {
-		f(id, pr)
-	}
-}
-
-// bcastAppend sends RPC, with entries to all peers that are not up-to-date
-// according to the progress recorded in r.prs.
-func (r *raft) bcastAppend() {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-
-		r.sendAppend(id)
-	})
-}
-
-// bcastHeartbeat sends RPC, without entries to all the peers.
-func (r *raft) bcastHeartbeat() {
-	lastCtx := r.readOnly.lastPendingRequestCtx()
-	if len(lastCtx) == 0 {
-		r.bcastHeartbeatWithCtx(nil)
-	} else {
-		r.bcastHeartbeatWithCtx([]byte(lastCtx))
-	}
-}
-
-func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-		r.sendHeartbeat(id, ctx)
-	})
-}
-
-// maybeCommit attempts to advance the commit index. Returns true if
-// the commit index changed (in which case the caller should call
-// r.bcastAppend).
-func (r *raft) maybeCommit() bool {
-	// TODO(bmizerany): optimize.. Currently naive
-	mis := make(uint64Slice, 0, len(r.prs))
-	for _, p := range r.prs {
-		mis = append(mis, p.Match)
-	}
-	sort.Sort(sort.Reverse(mis))
-	mci := mis[r.quorum()-1]
-	return r.raftLog.maybeCommit(mci, r.Term)
-}
-
-func (r *raft) reset(term uint64) {
-	if r.Term != term {
-		r.Term = term
-		r.Vote = None
-	}
-	r.lead = None
-
-	r.electionElapsed = 0
-	r.heartbeatElapsed = 0
-	r.resetRandomizedElectionTimeout()
-
-	r.abortLeaderTransfer()
-
-	r.votes = make(map[uint64]bool)
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
-		if id == r.id {
-			pr.Match = r.raftLog.lastIndex()
-		}
-	})
-
-	r.pendingConf = false
-	r.readOnly = newReadOnly(r.readOnly.option)
-}
-
-func (r *raft) appendEntry(es ...pb.Entry) {
-	li := r.raftLog.lastIndex()
-	for i := range es {
-		es[i].Term = r.Term
-		es[i].Index = li + 1 + uint64(i)
-	}
-	r.raftLog.append(es...)
-	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
-	// Regardless of maybeCommit's return, our caller will call bcastAppend.
-	r.maybeCommit()
-}
-
-// tickElection is run by followers and candidates after r.electionTimeout.
-func (r *raft) tickElection() {
-	r.electionElapsed++
-
-	if r.promotable() && r.pastElectionTimeout() {
-		r.electionElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
-	}
-}
-
-// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
-func (r *raft) tickHeartbeat() {
-	r.heartbeatElapsed++
-	r.electionElapsed++
-
-	if r.electionElapsed >= r.electionTimeout {
-		r.electionElapsed = 0
-		if r.checkQuorum {
-			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
-		}
-		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
-		if r.state == StateLeader && r.leadTransferee != None {
-			r.abortLeaderTransfer()
-		}
-	}
-
-	if r.state != StateLeader {
-		return
-	}
-
-	if r.heartbeatElapsed >= r.heartbeatTimeout {
-		r.heartbeatElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
-	}
-}
-
-func (r *raft) becomeFollower(term uint64, lead uint64) {
-	r.step = stepFollower
-	r.reset(term)
-	r.tick = r.tickElection
-	r.lead = lead
-	r.state = StateFollower
-	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> candidate]")
-	}
-	r.step = stepCandidate
-	r.reset(r.Term + 1)
-	r.tick = r.tickElection
-	r.Vote = r.id
-	r.state = StateCandidate
-	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomePreCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> pre-candidate]")
-	}
-	// Becoming a pre-candidate changes our step functions and state,
-	// but doesn't change anything else. In particular it does not increase
-	// r.Term or change r.Vote.
-	r.step = stepCandidate
-	r.votes = make(map[uint64]bool)
-	r.tick = r.tickElection
-	r.lead = None
-	r.state = StatePreCandidate
-	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeLeader() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateFollower {
-		panic("invalid transition [follower -> leader]")
-	}
-	r.step = stepLeader
-	r.reset(r.Term)
-	r.tick = r.tickHeartbeat
-	r.lead = r.id
-	r.state = StateLeader
-	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
-	if err != nil {
-		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
-	}
-
-	nconf := numOfPendingConf(ents)
-	if nconf > 1 {
-		panic("unexpected multiple uncommitted config entry")
-	}
-	if nconf == 1 {
-		r.pendingConf = true
-	}
-
-	r.appendEntry(pb.Entry{Data: nil})
-	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
-}
-
-func (r *raft) campaign(t CampaignType) {
-	var term uint64
-	var voteMsg pb.MessageType
-	if t == campaignPreElection {
-		r.becomePreCandidate()
-		voteMsg = pb.MsgPreVote
-		// PreVote RPCs are sent for the next term before we've incremented r.Term.
-		term = r.Term + 1
-	} else {
-		r.becomeCandidate()
-		voteMsg = pb.MsgVote
-		term = r.Term
-	}
-	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
-		// We won the election after voting for ourselves (which must mean that
-		// this is a single-node cluster). Advance to the next state.
-		if t == campaignPreElection {
-			r.campaign(campaignElection)
-		} else {
-			r.becomeLeader()
-		}
-		return
-	}
-	for id := range r.prs {
-		if id == r.id {
-			continue
-		}
-		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
-
-		var ctx []byte
-		if t == campaignTransfer {
-			ctx = []byte(t)
-		}
-		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
-	}
-}
-
-func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
-	if v {
-		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
-	} else {
-		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
-	}
-	if _, ok := r.votes[id]; !ok {
-		r.votes[id] = v
-	}
-	for _, vv := range r.votes {
-		if vv {
-			granted++
-		}
-	}
-	return granted
-}
-
-func (r *raft) Step(m pb.Message) error {
-	// Handle the message term, which may result in our stepping down to a follower.
-	switch {
-	case m.Term == 0:
-		// local message
-	case m.Term > r.Term:
-		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
-			force := bytes.Equal(m.Context, []byte(campaignTransfer))
-			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
-			if !force && inLease {
-				// If a server receives a RequestVote request within the minimum election timeout
-				// of hearing from a current leader, it does not update its term or grant its vote
-				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
-					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
-				return nil
-			}
-		}
-		switch {
-		case m.Type == pb.MsgPreVote:
-			// Never change our term in response to a PreVote
-		case m.Type == pb.MsgPreVoteResp && !m.Reject:
-			// We send pre-vote requests with a term in our future. If the
-			// pre-vote is granted, we will increment our term when we get a
-			// quorum. If it is not, the term comes from the node that
-			// rejected our vote so we should become a follower at the new
-			// term.
-		default:
-			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
-				r.becomeFollower(m.Term, m.From)
-			} else {
-				r.becomeFollower(m.Term, None)
-			}
-		}
-
-	case m.Term < r.Term:
-		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
-			// We have received messages from a leader at a lower term. It is possible
-			// that these messages were simply delayed in the network, but this could
-			// also mean that this node has advanced its term number during a network
-			// partition, and it is now unable to either win an election or to rejoin
-			// the majority on the old term. If checkQuorum is false, this will be
-			// handled by incrementing term numbers in response to MsgVote with a
-			// higher term, but if checkQuorum is true we may not advance the term on
-			// MsgVote and must generate other messages to advance the term. The net
-			// result of these two features is to minimize the disruption caused by
-			// nodes that have been removed from the cluster's configuration: a
-			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
-			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
-			// disruptive term increases
-			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
-		} else {
-			// ignore other cases
-			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-		}
-		return nil
-	}
-
-	switch m.Type {
-	case pb.MsgHup:
-		if r.state != StateLeader {
-			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
-			if err != nil {
-				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
-			}
-			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
-				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
-				return nil
-			}
-
-			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
-			if r.preVote {
-				r.campaign(campaignPreElection)
-			} else {
-				r.campaign(campaignElection)
-			}
-		} else {
-			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
-		}
-
-	case pb.MsgVote, pb.MsgPreVote:
-		if r.isLearner {
-			// TODO: learner may need to vote, in case of node down when confchange.
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			return nil
-		}
-		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
-		// always equal r.Term.
-		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			// When responding to Msg{Pre,}Vote messages we include the term
-			// from the message, not the local term. To see why consider the
-			// case where a single node was previously partitioned away and
-			// it's local term is now of date. If we include the local term
-			// (recall that for pre-votes we don't update the local term), the
-			// (pre-)campaigning node on the other end will proceed to ignore
-			// the message (it ignores all out of date messages).
-			// The term in the original message and current local term are the
-			// same in the case of regular votes, but different for pre-votes.
-			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
-			if m.Type == pb.MsgVote {
-				// Only record real votes.
-				r.electionElapsed = 0
-				r.Vote = m.From
-			}
-		} else {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
-		}
-
-	default:
-		r.step(r, m)
-	}
-	return nil
-}
-
-type stepFunc func(r *raft, m pb.Message)
-
-func stepLeader(r *raft, m pb.Message) {
-	// These message types do not require any progress for m.From.
-	switch m.Type {
-	case pb.MsgBeat:
-		r.bcastHeartbeat()
-		return
-	case pb.MsgCheckQuorum:
-		if !r.checkQuorumActive() {
-			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
-			r.becomeFollower(r.Term, None)
-		}
-		return
-	case pb.MsgProp:
-		if len(m.Entries) == 0 {
-			r.logger.Panicf("%x stepped empty MsgProp", r.id)
-		}
-		if _, ok := r.prs[r.id]; !ok {
-			// If we are not currently a member of the range (i.e. this node
-			// was removed from the configuration while serving as leader),
-			// drop any new proposals.
-			return
-		}
-		if r.leadTransferee != None {
-			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
-			return
-		}
-
-		for i, e := range m.Entries {
-			if e.Type == pb.EntryConfChange {
-				if r.pendingConf {
-					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
-					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
-				}
-				r.pendingConf = true
-			}
-		}
-		r.appendEntry(m.Entries...)
-		r.bcastAppend()
-		return
-	case pb.MsgReadIndex:
-		if r.quorum() > 1 {
-			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
-				// Reject read only request when this leader has not committed any log entry at its term.
-				return
-			}
-
-			// thinking: use an interally defined context instead of the user given context.
-			// We can express this in terms of the term and index instead of a user-supplied value.
-			// This would allow multiple reads to piggyback on the same message.
-			switch r.readOnly.option {
-			case ReadOnlySafe:
-				r.readOnly.addRequest(r.raftLog.committed, m)
-				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
-			case ReadOnlyLeaseBased:
-				ri := r.raftLog.committed
-				if m.From == None || m.From == r.id { // from local member
-					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-				} else {
-					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
-				}
-			}
-		} else {
-			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-		}
-
-		return
-	}
-
-	// All other message types require a progress for m.From (pr).
-	pr := r.getProgress(m.From)
-	if pr == nil {
-		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
-		return
-	}
-	switch m.Type {
-	case pb.MsgAppResp:
-		pr.RecentActive = true
-
-		if m.Reject {
-			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
-				r.id, m.RejectHint, m.From, m.Index)
-			if pr.maybeDecrTo(m.Index, m.RejectHint) {
-				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
-				if pr.State == ProgressStateReplicate {
-					pr.becomeProbe()
-				}
-				r.sendAppend(m.From)
-			}
-		} else {
-			oldPaused := pr.IsPaused()
-			if pr.maybeUpdate(m.Index) {
-				switch {
-				case pr.State == ProgressStateProbe:
-					pr.becomeReplicate()
-				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
-					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-					pr.becomeProbe()
-				case pr.State == ProgressStateReplicate:
-					pr.ins.freeTo(m.Index)
-				}
-
-				if r.maybeCommit() {
-					r.bcastAppend()
-				} else if oldPaused {
-					// update() reset the wait state on this node. If we had delayed sending
-					// an update before, send it now.
-					r.sendAppend(m.From)
-				}
-				// Transfer leadership is in progress.
-				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
-					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
-					r.sendTimeoutNow(m.From)
-				}
-			}
-		}
-	case pb.MsgHeartbeatResp:
-		pr.RecentActive = true
-		pr.resume()
-
-		// free one slot for the full inflights window to allow progress.
-		if pr.State == ProgressStateReplicate && pr.ins.full() {
-			pr.ins.freeFirstOne()
-		}
-		if pr.Match < r.raftLog.lastIndex() {
-			r.sendAppend(m.From)
-		}
-
-		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
-			return
-		}
-
-		ackCount := r.readOnly.recvAck(m)
-		if ackCount < r.quorum() {
-			return
-		}
-
-		rss := r.readOnly.advance(m)
-		for _, rs := range rss {
-			req := rs.req
-			if req.From == None || req.From == r.id { // from local member
-				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
-			} else {
-				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
-			}
-		}
-	case pb.MsgSnapStatus:
-		if pr.State != ProgressStateSnapshot {
-			return
-		}
-		if !m.Reject {
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		} else {
-			pr.snapshotFailure()
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		}
-		// If snapshot finish, wait for the msgAppResp from the remote node before sending
-		// out the next msgApp.
-		// If snapshot failure, wait for a heartbeat interval before next try
-		pr.pause()
-	case pb.MsgUnreachable:
-		// During optimistic replication, if the remote becomes unreachable,
-		// there is huge probability that a MsgApp is lost.
-		if pr.State == ProgressStateReplicate {
-			pr.becomeProbe()
-		}
-		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
-	case pb.MsgTransferLeader:
-		if pr.IsLearner {
-			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
-			return
-		}
-		leadTransferee := m.From
-		lastLeadTransferee := r.leadTransferee
-		if lastLeadTransferee != None {
-			if lastLeadTransferee == leadTransferee {
-				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
-					r.id, r.Term, leadTransferee, leadTransferee)
-				return
-			}
-			r.abortLeaderTransfer()
-			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
-		}
-		if leadTransferee == r.id {
-			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
-			return
-		}
-		// Transfer leadership to third party.
-		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
-		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
-		r.electionElapsed = 0
-		r.leadTransferee = leadTransferee
-		if pr.Match == r.raftLog.lastIndex() {
-			r.sendTimeoutNow(leadTransferee)
-			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
-		} else {
-			r.sendAppend(leadTransferee)
-		}
-	}
-}
-
-// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
-// whether they respond to MsgVoteResp or MsgPreVoteResp.
-func stepCandidate(r *raft, m pb.Message) {
-	// Only handle vote responses corresponding to our candidacy (while in
-	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
-	// our pre-candidate state).
-	var myVoteRespType pb.MessageType
-	if r.state == StatePreCandidate {
-		myVoteRespType = pb.MsgPreVoteResp
-	} else {
-		myVoteRespType = pb.MsgVoteResp
-	}
-	switch m.Type {
-	case pb.MsgProp:
-		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-		return
-	case pb.MsgApp:
-		r.becomeFollower(r.Term, m.From)
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.becomeFollower(r.Term, m.From)
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.becomeFollower(m.Term, m.From)
-		r.handleSnapshot(m)
-	case myVoteRespType:
-		gr := r.poll(m.From, m.Type, !m.Reject)
-		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
-		switch r.quorum() {
-		case gr:
-			if r.state == StatePreCandidate {
-				r.campaign(campaignElection)
-			} else {
-				r.becomeLeader()
-				r.bcastAppend()
-			}
-		case len(r.votes) - gr:
-			r.becomeFollower(r.Term, None)
-		}
-	case pb.MsgTimeoutNow:
-		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
-	}
-}
-
-func stepFollower(r *raft, m pb.Message) {
-	switch m.Type {
-	case pb.MsgProp:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-			return
-		} else if r.disableProposalForwarding {
-			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgApp:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleSnapshot(m)
-	case pb.MsgTransferLeader:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgTimeoutNow:
-		if r.promotable() {
-			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
-			// Leadership transfers never use pre-vote even if r.preVote is true; we
-			// know we are not recovering from a partition so there is no need for the
-			// extra round trip.
-			r.campaign(campaignTransfer)
-		} else {
-			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
-		}
-	case pb.MsgReadIndex:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgReadIndexResp:
-		if len(m.Entries) != 1 {
-			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
-			return
-		}
-		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
-	}
-}
-
-func (r *raft) handleAppendEntries(m pb.Message) {
-	if m.Index < r.raftLog.committed {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-		return
-	}
-
-	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
-	} else {
-		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
-			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
-	}
-}
-
-func (r *raft) handleHeartbeat(m pb.Message) {
-	r.raftLog.commitTo(m.Commit)
-	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
-}
-
-func (r *raft) handleSnapshot(m pb.Message) {
-	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
-	if r.restore(m.Snapshot) {
-		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
-	} else {
-		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-	}
-}
-
-// restore recovers the state machine from a snapshot. It restores the log and the
-// configuration of state machine.
-func (r *raft) restore(s pb.Snapshot) bool {
-	if s.Metadata.Index <= r.raftLog.committed {
-		return false
-	}
-	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
-		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-		r.raftLog.commitTo(s.Metadata.Index)
-		return false
-	}
-
-	// The normal peer can't become learner.
-	if !r.isLearner {
-		for _, id := range s.Metadata.ConfState.Learners {
-			if id == r.id {
-				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
-				return false
-			}
-		}
-	}
-
-	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
-		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-
-	r.raftLog.restore(s)
-	r.prs = make(map[uint64]*Progress)
-	r.learnerPrs = make(map[uint64]*Progress)
-	r.restoreNode(s.Metadata.ConfState.Nodes, false)
-	r.restoreNode(s.Metadata.ConfState.Learners, true)
-	return true
-}
-
-func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
-	for _, n := range nodes {
-		match, next := uint64(0), r.raftLog.lastIndex()+1
-		if n == r.id {
-			match = next - 1
-			r.isLearner = isLearner
-		}
-		r.setProgress(n, match, next, isLearner)
-		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
-	}
-}
-
-// promotable indicates whether state machine can be promoted to leader,
-// which is true when its own id is in progress list.
-func (r *raft) promotable() bool {
-	_, ok := r.prs[r.id]
-	return ok
-}
-
-func (r *raft) addNode(id uint64) {
-	r.addNodeOrLearnerNode(id, false)
-}
-
-func (r *raft) addLearner(id uint64) {
-	r.addNodeOrLearnerNode(id, true)
-}
-
-func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
-	r.pendingConf = false
-	pr := r.getProgress(id)
-	if pr == nil {
-		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
-	} else {
-		if isLearner && !pr.IsLearner {
-			// can only change Learner to Voter
-			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
-			return
-		}
-
-		if isLearner == pr.IsLearner {
-			// Ignore any redundant addNode calls (which can happen because the
-			// initial bootstrapping entries are applied twice).
-			return
-		}
-
-		// change Learner to Voter, use origin Learner progress
-		delete(r.learnerPrs, id)
-		pr.IsLearner = false
-		r.prs[id] = pr
-	}
-
-	if r.id == id {
-		r.isLearner = isLearner
-	}
-
-	// When a node is first added, we should mark it as recently active.
-	// Otherwise, CheckQuorum may cause us to step down if it is invoked
-	// before the added node has a chance to communicate with us.
-	pr = r.getProgress(id)
-	pr.RecentActive = true
-}
-
-func (r *raft) removeNode(id uint64) {
-	r.delProgress(id)
-	r.pendingConf = false
-
-	// do not try to commit or abort transferring if there is no nodes in the cluster.
-	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
-		return
-	}
-
-	// The quorum size is now smaller, so see if any pending entries can
-	// be committed.
-	if r.maybeCommit() {
-		r.bcastAppend()
-	}
-	// If the removed node is the leadTransferee, then abort the leadership transferring.
-	if r.state == StateLeader && r.leadTransferee == id {
-		r.abortLeaderTransfer()
-	}
-}
-
-func (r *raft) resetPendingConf() { r.pendingConf = false }
-
-func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
-	if !isLearner {
-		delete(r.learnerPrs, id)
-		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
-		return
-	}
-
-	if _, ok := r.prs[id]; ok {
-		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
-	}
-	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
-}
-
-func (r *raft) delProgress(id uint64) {
-	delete(r.prs, id)
-	delete(r.learnerPrs, id)
-}
-
-func (r *raft) loadState(state pb.HardState) {
-	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
-		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
-	}
-	r.raftLog.committed = state.Commit
-	r.Term = state.Term
-	r.Vote = state.Vote
-}
-
-// pastElectionTimeout returns true iff r.electionElapsed is greater
-// than or equal to the randomized election timeout in
-// [electiontimeout, 2 * electiontimeout - 1].
-func (r *raft) pastElectionTimeout() bool {
-	return r.electionElapsed >= r.randomizedElectionTimeout
-}
-
-func (r *raft) resetRandomizedElectionTimeout() {
-	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
-}
-
-// checkQuorumActive returns true if the quorum is active from
-// the view of the local raft state machine. Otherwise, it returns
-// false.
-// checkQuorumActive also resets all RecentActive to false.
-func (r *raft) checkQuorumActive() bool {
-	var act int
-
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		if id == r.id { // self is always active
-			act++
-			return
-		}
-
-		if pr.RecentActive && !pr.IsLearner {
-			act++
-		}
-
-		pr.RecentActive = false
-	})
-
-	return act >= r.quorum()
-}
-
-func (r *raft) sendTimeoutNow(to uint64) {
-	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
-}
-
-func (r *raft) abortLeaderTransfer() {
-	r.leadTransferee = None
-}
-
-func numOfPendingConf(ents []pb.Entry) int {
-	n := 0
-	for i := range ents {
-		if ents[i].Type == pb.EntryConfChange {
-			n++
-		}
-	}
-	return n
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
deleted file mode 100644
index 753bd84..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
+++ /dev/null
@@ -1,2365 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft.proto
-
-package raftpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type EntryType int32
-
-const (
-	EntryNormal     EntryType = 0
-	EntryConfChange EntryType = 1
-)
-
-var EntryType_name = map[int32]string{
-	0: "EntryNormal",
-	1: "EntryConfChange",
-}
-
-var EntryType_value = map[string]int32{
-	"EntryNormal":     0,
-	"EntryConfChange": 1,
-}
-
-func (x EntryType) Enum() *EntryType {
-	p := new(EntryType)
-	*p = x
-	return p
-}
-
-func (x EntryType) String() string {
-	return proto.EnumName(EntryType_name, int32(x))
-}
-
-func (x *EntryType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
-	if err != nil {
-		return err
-	}
-	*x = EntryType(value)
-	return nil
-}
-
-func (EntryType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{0}
-}
-
-type MessageType int32
-
-const (
-	MsgHup            MessageType = 0
-	MsgBeat           MessageType = 1
-	MsgProp           MessageType = 2
-	MsgApp            MessageType = 3
-	MsgAppResp        MessageType = 4
-	MsgVote           MessageType = 5
-	MsgVoteResp       MessageType = 6
-	MsgSnap           MessageType = 7
-	MsgHeartbeat      MessageType = 8
-	MsgHeartbeatResp  MessageType = 9
-	MsgUnreachable    MessageType = 10
-	MsgSnapStatus     MessageType = 11
-	MsgCheckQuorum    MessageType = 12
-	MsgTransferLeader MessageType = 13
-	MsgTimeoutNow     MessageType = 14
-	MsgReadIndex      MessageType = 15
-	MsgReadIndexResp  MessageType = 16
-	MsgPreVote        MessageType = 17
-	MsgPreVoteResp    MessageType = 18
-)
-
-var MessageType_name = map[int32]string{
-	0:  "MsgHup",
-	1:  "MsgBeat",
-	2:  "MsgProp",
-	3:  "MsgApp",
-	4:  "MsgAppResp",
-	5:  "MsgVote",
-	6:  "MsgVoteResp",
-	7:  "MsgSnap",
-	8:  "MsgHeartbeat",
-	9:  "MsgHeartbeatResp",
-	10: "MsgUnreachable",
-	11: "MsgSnapStatus",
-	12: "MsgCheckQuorum",
-	13: "MsgTransferLeader",
-	14: "MsgTimeoutNow",
-	15: "MsgReadIndex",
-	16: "MsgReadIndexResp",
-	17: "MsgPreVote",
-	18: "MsgPreVoteResp",
-}
-
-var MessageType_value = map[string]int32{
-	"MsgHup":            0,
-	"MsgBeat":           1,
-	"MsgProp":           2,
-	"MsgApp":            3,
-	"MsgAppResp":        4,
-	"MsgVote":           5,
-	"MsgVoteResp":       6,
-	"MsgSnap":           7,
-	"MsgHeartbeat":      8,
-	"MsgHeartbeatResp":  9,
-	"MsgUnreachable":    10,
-	"MsgSnapStatus":     11,
-	"MsgCheckQuorum":    12,
-	"MsgTransferLeader": 13,
-	"MsgTimeoutNow":     14,
-	"MsgReadIndex":      15,
-	"MsgReadIndexResp":  16,
-	"MsgPreVote":        17,
-	"MsgPreVoteResp":    18,
-}
-
-func (x MessageType) Enum() *MessageType {
-	p := new(MessageType)
-	*p = x
-	return p
-}
-
-func (x MessageType) String() string {
-	return proto.EnumName(MessageType_name, int32(x))
-}
-
-func (x *MessageType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
-	if err != nil {
-		return err
-	}
-	*x = MessageType(value)
-	return nil
-}
-
-func (MessageType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{1}
-}
-
-type ConfChangeType int32
-
-const (
-	ConfChangeAddNode        ConfChangeType = 0
-	ConfChangeRemoveNode     ConfChangeType = 1
-	ConfChangeUpdateNode     ConfChangeType = 2
-	ConfChangeAddLearnerNode ConfChangeType = 3
-)
-
-var ConfChangeType_name = map[int32]string{
-	0: "ConfChangeAddNode",
-	1: "ConfChangeRemoveNode",
-	2: "ConfChangeUpdateNode",
-	3: "ConfChangeAddLearnerNode",
-}
-
-var ConfChangeType_value = map[string]int32{
-	"ConfChangeAddNode":        0,
-	"ConfChangeRemoveNode":     1,
-	"ConfChangeUpdateNode":     2,
-	"ConfChangeAddLearnerNode": 3,
-}
-
-func (x ConfChangeType) Enum() *ConfChangeType {
-	p := new(ConfChangeType)
-	*p = x
-	return p
-}
-
-func (x ConfChangeType) String() string {
-	return proto.EnumName(ConfChangeType_name, int32(x))
-}
-
-func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
-	if err != nil {
-		return err
-	}
-	*x = ConfChangeType(value)
-	return nil
-}
-
-func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{2}
-}
-
-type Entry struct {
-	Term                 uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
-	Index                uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
-	Type                 EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
-	Data                 []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *Entry) Reset()         { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage()    {}
-func (*Entry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{0}
-}
-func (m *Entry) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Entry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Entry.Merge(m, src)
-}
-func (m *Entry) XXX_Size() int {
-	return m.Size()
-}
-func (m *Entry) XXX_DiscardUnknown() {
-	xxx_messageInfo_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Entry proto.InternalMessageInfo
-
-type SnapshotMetadata struct {
-	ConfState            ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
-	Index                uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
-	Term                 uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
-func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
-func (*SnapshotMetadata) ProtoMessage()    {}
-func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{1}
-}
-func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotMetadata.Merge(m, src)
-}
-func (m *SnapshotMetadata) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotMetadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
-
-type Snapshot struct {
-	Data                 []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
-	Metadata             SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *Snapshot) Reset()         { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage()    {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{2}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
-	return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
-	xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-type Message struct {
-	Type                 MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
-	To                   uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
-	From                 uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
-	Term                 uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
-	LogTerm              uint64      `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
-	Index                uint64      `protobuf:"varint,6,opt,name=index" json:"index"`
-	Entries              []Entry     `protobuf:"bytes,7,rep,name=entries" json:"entries"`
-	Commit               uint64      `protobuf:"varint,8,opt,name=commit" json:"commit"`
-	Snapshot             Snapshot    `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
-	Reject               bool        `protobuf:"varint,10,opt,name=reject" json:"reject"`
-	RejectHint           uint64      `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
-	Context              []byte      `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
-	XXX_unrecognized     []byte      `json:"-"`
-	XXX_sizecache        int32       `json:"-"`
-}
-
-func (m *Message) Reset()         { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage()    {}
-func (*Message) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{3}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Message) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Message.Merge(m, src)
-}
-func (m *Message) XXX_Size() int {
-	return m.Size()
-}
-func (m *Message) XXX_DiscardUnknown() {
-	xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-type HardState struct {
-	Term                 uint64   `protobuf:"varint,1,opt,name=term" json:"term"`
-	Vote                 uint64   `protobuf:"varint,2,opt,name=vote" json:"vote"`
-	Commit               uint64   `protobuf:"varint,3,opt,name=commit" json:"commit"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HardState) Reset()         { *m = HardState{} }
-func (m *HardState) String() string { return proto.CompactTextString(m) }
-func (*HardState) ProtoMessage()    {}
-func (*HardState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{4}
-}
-func (m *HardState) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HardState) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HardState.Merge(m, src)
-}
-func (m *HardState) XXX_Size() int {
-	return m.Size()
-}
-func (m *HardState) XXX_DiscardUnknown() {
-	xxx_messageInfo_HardState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HardState proto.InternalMessageInfo
-
-type ConfState struct {
-	Nodes                []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
-	Learners             []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ConfState) Reset()         { *m = ConfState{} }
-func (m *ConfState) String() string { return proto.CompactTextString(m) }
-func (*ConfState) ProtoMessage()    {}
-func (*ConfState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{5}
-}
-func (m *ConfState) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ConfState) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ConfState.Merge(m, src)
-}
-func (m *ConfState) XXX_Size() int {
-	return m.Size()
-}
-func (m *ConfState) XXX_DiscardUnknown() {
-	xxx_messageInfo_ConfState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfState proto.InternalMessageInfo
-
-type ConfChange struct {
-	ID                   uint64         `protobuf:"varint,1,opt,name=ID" json:"ID"`
-	Type                 ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
-	NodeID               uint64         `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
-	Context              []byte         `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *ConfChange) Reset()         { *m = ConfChange{} }
-func (m *ConfChange) String() string { return proto.CompactTextString(m) }
-func (*ConfChange) ProtoMessage()    {}
-func (*ConfChange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{6}
-}
-func (m *ConfChange) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ConfChange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ConfChange.Merge(m, src)
-}
-func (m *ConfChange) XXX_Size() int {
-	return m.Size()
-}
-func (m *ConfChange) XXX_DiscardUnknown() {
-	xxx_messageInfo_ConfChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChange proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
-	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
-	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
-	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
-	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
-	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
-	proto.RegisterType((*Message)(nil), "raftpb.Message")
-	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
-	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
-	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
-}
-
-func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
-
-var fileDescriptor_b042552c306ae59b = []byte{
-	// 816 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
-	0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
-	0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
-	0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
-	0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
-	0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9,
-	0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
-	0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a,
-	0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68,
-	0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2,
-	0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77,
-	0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd,
-	0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0,
-	0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d,
-	0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7,
-	0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf,
-	0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1,
-	0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21,
-	0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac,
-	0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41,
-	0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a,
-	0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda,
-	0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72,
-	0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb,
-	0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f,
-	0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6,
-	0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac,
-	0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5,
-	0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72,
-	0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73,
-	0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4,
-	0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa,
-	0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b,
-	0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b,
-	0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36,
-	0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19,
-	0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6,
-	0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2,
-	0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8,
-	0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5,
-	0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7,
-	0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb,
-	0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24,
-	0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf,
-	0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3,
-	0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50,
-	0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78,
-	0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01,
-	0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f,
-	0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf,
-	0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00,
-}
-
-func (m *Entry) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Data != nil {
-		i -= len(m.Data)
-		copy(dAtA[i:], m.Data)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
-		i--
-		dAtA[i] = 0x22
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x10
-	{
-		size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	{
-		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x12
-	if m.Data != nil {
-		i -= len(m.Data)
-		copy(dAtA[i:], m.Data)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Message) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Message) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Context != nil {
-		i -= len(m.Context)
-		copy(dAtA[i:], m.Context)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
-		i--
-		dAtA[i] = 0x62
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
-	i--
-	dAtA[i] = 0x58
-	i--
-	if m.Reject {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x50
-	{
-		size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x4a
-	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
-	i--
-	dAtA[i] = 0x40
-	if len(m.Entries) > 0 {
-		for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRaft(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x3a
-		}
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x30
-	i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
-	i--
-	dAtA[i] = 0x28
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x20
-	i = encodeVarintRaft(dAtA, i, uint64(m.From))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.To))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *HardState) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *ConfState) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Learners) > 0 {
-		for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
-			i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
-			i--
-			dAtA[i] = 0x10
-		}
-	}
-	if len(m.Nodes) > 0 {
-		for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
-			i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx]))
-			i--
-			dAtA[i] = 0x8
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ConfChange) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Context != nil {
-		i -= len(m.Context)
-		copy(dAtA[i:], m.Context)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
-		i--
-		dAtA[i] = 0x22
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.ID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRaft(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *Entry) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.Index))
-	if m.Data != nil {
-		l = len(m.Data)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotMetadata) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ConfState.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	n += 1 + sovRaft(uint64(m.Index))
-	n += 1 + sovRaft(uint64(m.Term))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Snapshot) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Data != nil {
-		l = len(m.Data)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	l = m.Metadata.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Message) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.To))
-	n += 1 + sovRaft(uint64(m.From))
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.LogTerm))
-	n += 1 + sovRaft(uint64(m.Index))
-	if len(m.Entries) > 0 {
-		for _, e := range m.Entries {
-			l = e.Size()
-			n += 1 + l + sovRaft(uint64(l))
-		}
-	}
-	n += 1 + sovRaft(uint64(m.Commit))
-	l = m.Snapshot.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	n += 2
-	n += 1 + sovRaft(uint64(m.RejectHint))
-	if m.Context != nil {
-		l = len(m.Context)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HardState) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.Vote))
-	n += 1 + sovRaft(uint64(m.Commit))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ConfState) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Nodes) > 0 {
-		for _, e := range m.Nodes {
-			n += 1 + sovRaft(uint64(e))
-		}
-	}
-	if len(m.Learners) > 0 {
-		for _, e := range m.Learners {
-			n += 1 + sovRaft(uint64(e))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ConfChange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.ID))
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.NodeID))
-	if m.Context != nil {
-		l = len(m.Context)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRaft(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaft(x uint64) (n int) {
-	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Entry) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= EntryType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
-			if m.Data == nil {
-				m.Data = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
-			if m.Data == nil {
-				m.Data = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Message) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Message: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= MessageType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
-			}
-			m.To = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.To |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
-			}
-			m.From = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.From |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
-			}
-			m.LogTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.LogTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Entries = append(m.Entries, Entry{})
-			if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
-			}
-			m.Commit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Commit |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 9:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Reject = bool(v != 0)
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
-			}
-			m.RejectHint = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RejectHint |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
-			if m.Context == nil {
-				m.Context = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HardState) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
-			}
-			m.Vote = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Vote |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
-			}
-			m.Commit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Commit |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ConfState) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Nodes = append(m.Nodes, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRaft
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRaft
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.Nodes) == 0 {
-					m.Nodes = make([]uint64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRaft
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Nodes = append(m.Nodes, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
-			}
-		case 2:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Learners = append(m.Learners, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRaft
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRaft
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.Learners) == 0 {
-					m.Learners = make([]uint64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRaft
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Learners = append(m.Learners, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ConfChange) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= ConfChangeType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
-			}
-			m.NodeID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.NodeID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
-			if m.Context == nil {
-				m.Context = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRaft(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRaft
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRaft
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRaft(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRaft
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
deleted file mode 100644
index 644ce7b..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
+++ /dev/null
@@ -1,95 +0,0 @@
-syntax = "proto2";
-package raftpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-enum EntryType {
-	EntryNormal     = 0;
-	EntryConfChange = 1;
-}
-
-message Entry {
-	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
-	optional bytes      Data  = 4;
-}
-
-message SnapshotMetadata {
-	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
-	optional uint64    index      = 2 [(gogoproto.nullable) = false];
-	optional uint64    term       = 3 [(gogoproto.nullable) = false];
-}
-
-message Snapshot {
-	optional bytes            data     = 1;
-	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
-}
-
-enum MessageType {
-	MsgHup             = 0;
-	MsgBeat            = 1;
-	MsgProp            = 2;
-	MsgApp             = 3;
-	MsgAppResp         = 4;
-	MsgVote            = 5;
-	MsgVoteResp        = 6;
-	MsgSnap            = 7;
-	MsgHeartbeat       = 8;
-	MsgHeartbeatResp   = 9;
-	MsgUnreachable     = 10;
-	MsgSnapStatus      = 11;
-	MsgCheckQuorum     = 12;
-	MsgTransferLeader  = 13;
-	MsgTimeoutNow      = 14;
-	MsgReadIndex       = 15;
-	MsgReadIndexResp   = 16;
-	MsgPreVote         = 17;
-	MsgPreVoteResp     = 18;
-}
-
-message Message {
-	optional MessageType type        = 1  [(gogoproto.nullable) = false];
-	optional uint64      to          = 2  [(gogoproto.nullable) = false];
-	optional uint64      from        = 3  [(gogoproto.nullable) = false];
-	optional uint64      term        = 4  [(gogoproto.nullable) = false];
-	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
-	optional uint64      index       = 6  [(gogoproto.nullable) = false];
-	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
-	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
-	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
-	optional bool        reject      = 10 [(gogoproto.nullable) = false];
-	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
-	optional bytes       context     = 12;
-}
-
-message HardState {
-	optional uint64 term   = 1 [(gogoproto.nullable) = false];
-	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
-	optional uint64 commit = 3 [(gogoproto.nullable) = false];
-}
-
-message ConfState {
-	repeated uint64 nodes    = 1;
-	repeated uint64 learners = 2;
-}
-
-enum ConfChangeType {
-	ConfChangeAddNode        = 0;
-	ConfChangeRemoveNode     = 1;
-	ConfChangeUpdateNode     = 2;
-	ConfChangeAddLearnerNode = 3;
-}
-
-message ConfChange {
-	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
-	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
-	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
-	optional bytes           Context = 4;
-}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
deleted file mode 100644
index 925cb85..0000000
--- a/vendor/github.com/coreos/etcd/raft/rawnode.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"errors"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrStepLocalMsg is returned when try to step a local raft message
-var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
-
-// ErrStepPeerNotFound is returned when try to step a response message
-// but there is no peer found in raft.prs for that node.
-var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
-
-// RawNode is a thread-unsafe Node.
-// The methods of this struct correspond to the methods of Node and are described
-// more fully there.
-type RawNode struct {
-	raft       *raft
-	prevSoftSt *SoftState
-	prevHardSt pb.HardState
-}
-
-func (rn *RawNode) newReady() Ready {
-	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
-}
-
-func (rn *RawNode) commitReady(rd Ready) {
-	if rd.SoftState != nil {
-		rn.prevSoftSt = rd.SoftState
-	}
-	if !IsEmptyHardState(rd.HardState) {
-		rn.prevHardSt = rd.HardState
-	}
-	if rn.prevHardSt.Commit != 0 {
-		// In most cases, prevHardSt and rd.HardState will be the same
-		// because when there are new entries to apply we just sent a
-		// HardState with an updated Commit value. However, on initial
-		// startup the two are different because we don't send a HardState
-		// until something changes, but we do send any un-applied but
-		// committed entries (and previously-committed entries may be
-		// incorporated into the snapshot, even if rd.CommittedEntries is
-		// empty). Therefore we mark all committed entries as applied
-		// whether they were included in rd.HardState or not.
-		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
-	}
-	if len(rd.Entries) > 0 {
-		e := rd.Entries[len(rd.Entries)-1]
-		rn.raft.raftLog.stableTo(e.Index, e.Term)
-	}
-	if !IsEmptySnap(rd.Snapshot) {
-		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
-	}
-	if len(rd.ReadStates) != 0 {
-		rn.raft.readStates = nil
-	}
-}
-
-// NewRawNode returns a new RawNode given configuration and a list of raft peers.
-func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
-	if config.ID == 0 {
-		panic("config.ID must not be zero")
-	}
-	r := newRaft(config)
-	rn := &RawNode{
-		raft: r,
-	}
-	lastIndex, err := config.Storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
-	// restoring an existing RawNode (like RestartNode).
-	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
-	// to be able to tell us when it expects the RawNode to exist.
-	if lastIndex == 0 {
-		r.becomeFollower(1, None)
-		ents := make([]pb.Entry, len(peers))
-		for i, peer := range peers {
-			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
-			data, err := cc.Marshal()
-			if err != nil {
-				panic("unexpected marshal error")
-			}
-
-			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
-		}
-		r.raftLog.append(ents...)
-		r.raftLog.committed = uint64(len(ents))
-		for _, peer := range peers {
-			r.addNode(peer.ID)
-		}
-	}
-
-	// Set the initial hard and soft states after performing all initialization.
-	rn.prevSoftSt = r.softState()
-	if lastIndex == 0 {
-		rn.prevHardSt = emptyState
-	} else {
-		rn.prevHardSt = r.hardState()
-	}
-
-	return rn, nil
-}
-
-// Tick advances the internal logical clock by a single tick.
-func (rn *RawNode) Tick() {
-	rn.raft.tick()
-}
-
-// TickQuiesced advances the internal logical clock by a single tick without
-// performing any other state machine processing. It allows the caller to avoid
-// periodic heartbeats and elections when all of the peers in a Raft group are
-// known to be at the same state. Expected usage is to periodically invoke Tick
-// or TickQuiesced depending on whether the group is "active" or "quiesced".
-//
-// WARNING: Be very careful about using this method as it subverts the Raft
-// state machine. You should probably be using Tick instead.
-func (rn *RawNode) TickQuiesced() {
-	rn.raft.electionElapsed++
-}
-
-// Campaign causes this RawNode to transition to candidate state.
-func (rn *RawNode) Campaign() error {
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgHup,
-	})
-}
-
-// Propose proposes data be appended to the raft log.
-func (rn *RawNode) Propose(data []byte) error {
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgProp,
-		From: rn.raft.id,
-		Entries: []pb.Entry{
-			{Data: data},
-		}})
-}
-
-// ProposeConfChange proposes a config change.
-func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
-	data, err := cc.Marshal()
-	if err != nil {
-		return err
-	}
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgProp,
-		Entries: []pb.Entry{
-			{Type: pb.EntryConfChange, Data: data},
-		},
-	})
-}
-
-// ApplyConfChange applies a config change to the local node.
-func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
-	if cc.NodeID == None {
-		rn.raft.resetPendingConf()
-		return &pb.ConfState{Nodes: rn.raft.nodes()}
-	}
-	switch cc.Type {
-	case pb.ConfChangeAddNode:
-		rn.raft.addNode(cc.NodeID)
-	case pb.ConfChangeAddLearnerNode:
-		rn.raft.addLearner(cc.NodeID)
-	case pb.ConfChangeRemoveNode:
-		rn.raft.removeNode(cc.NodeID)
-	case pb.ConfChangeUpdateNode:
-		rn.raft.resetPendingConf()
-	default:
-		panic("unexpected conf type")
-	}
-	return &pb.ConfState{Nodes: rn.raft.nodes()}
-}
-
-// Step advances the state machine using the given message.
-func (rn *RawNode) Step(m pb.Message) error {
-	// ignore unexpected local messages receiving over network
-	if IsLocalMsg(m.Type) {
-		return ErrStepLocalMsg
-	}
-	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
-		return rn.raft.Step(m)
-	}
-	return ErrStepPeerNotFound
-}
-
-// Ready returns the current point-in-time state of this RawNode.
-func (rn *RawNode) Ready() Ready {
-	rd := rn.newReady()
-	rn.raft.msgs = nil
-	return rd
-}
-
-// HasReady called when RawNode user need to check if any Ready pending.
-// Checking logic in this method should be consistent with Ready.containsUpdates().
-func (rn *RawNode) HasReady() bool {
-	r := rn.raft
-	if !r.softState().equal(rn.prevSoftSt) {
-		return true
-	}
-	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
-		return true
-	}
-	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
-		return true
-	}
-	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
-		return true
-	}
-	if len(r.readStates) != 0 {
-		return true
-	}
-	return false
-}
-
-// Advance notifies the RawNode that the application has applied and saved progress in the
-// last Ready results.
-func (rn *RawNode) Advance(rd Ready) {
-	rn.commitReady(rd)
-}
-
-// Status returns the current status of the given group.
-func (rn *RawNode) Status() *Status {
-	status := getStatus(rn.raft)
-	return &status
-}
-
-// ReportUnreachable reports the given node is not reachable for the last send.
-func (rn *RawNode) ReportUnreachable(id uint64) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
-}
-
-// ReportSnapshot reports the status of the sent snapshot.
-func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
-	rej := status == SnapshotFailure
-
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
-}
-
-// TransferLeader tries to transfer leadership to the given transferee.
-func (rn *RawNode) TransferLeader(transferee uint64) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
-}
-
-// ReadIndex requests a read state. The read state will be set in ready.
-// Read State has a read index. Once the application advances further than the read
-// index, any linearizable read requests issued before the read request can be
-// processed safely. The read state will have the same rctx attached.
-func (rn *RawNode) ReadIndex(rctx []byte) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
deleted file mode 100644
index ae746fa..0000000
--- a/vendor/github.com/coreos/etcd/raft/read_only.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// ReadState provides state for read only query.
-// It's caller's responsibility to call ReadIndex first before getting
-// this state from ready, it's also caller's duty to differentiate if this
-// state is what it requests through RequestCtx, eg. given a unique id as
-// RequestCtx
-type ReadState struct {
-	Index      uint64
-	RequestCtx []byte
-}
-
-type readIndexStatus struct {
-	req   pb.Message
-	index uint64
-	acks  map[uint64]struct{}
-}
-
-type readOnly struct {
-	option           ReadOnlyOption
-	pendingReadIndex map[string]*readIndexStatus
-	readIndexQueue   []string
-}
-
-func newReadOnly(option ReadOnlyOption) *readOnly {
-	return &readOnly{
-		option:           option,
-		pendingReadIndex: make(map[string]*readIndexStatus),
-	}
-}
-
-// addRequest adds a read only reuqest into readonly struct.
-// `index` is the commit index of the raft state machine when it received
-// the read only request.
-// `m` is the original read only request message from the local or remote node.
-func (ro *readOnly) addRequest(index uint64, m pb.Message) {
-	ctx := string(m.Entries[0].Data)
-	if _, ok := ro.pendingReadIndex[ctx]; ok {
-		return
-	}
-	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
-	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
-}
-
-// recvAck notifies the readonly struct that the raft state machine received
-// an acknowledgment of the heartbeat that attached with the read only request
-// context.
-func (ro *readOnly) recvAck(m pb.Message) int {
-	rs, ok := ro.pendingReadIndex[string(m.Context)]
-	if !ok {
-		return 0
-	}
-
-	rs.acks[m.From] = struct{}{}
-	// add one to include an ack from local node
-	return len(rs.acks) + 1
-}
-
-// advance advances the read only request queue kept by the readonly struct.
-// It dequeues the requests until it finds the read only request that has
-// the same context as the given `m`.
-func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
-	var (
-		i     int
-		found bool
-	)
-
-	ctx := string(m.Context)
-	rss := []*readIndexStatus{}
-
-	for _, okctx := range ro.readIndexQueue {
-		i++
-		rs, ok := ro.pendingReadIndex[okctx]
-		if !ok {
-			panic("cannot find corresponding read state from pending map")
-		}
-		rss = append(rss, rs)
-		if okctx == ctx {
-			found = true
-			break
-		}
-	}
-
-	if found {
-		ro.readIndexQueue = ro.readIndexQueue[i:]
-		for _, rs := range rss {
-			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
-		}
-		return rss
-	}
-
-	return nil
-}
-
-// lastPendingRequestCtx returns the context of the last pending read only
-// request in readonly struct.
-func (ro *readOnly) lastPendingRequestCtx() string {
-	if len(ro.readIndexQueue) == 0 {
-		return ""
-	}
-	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
-}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
deleted file mode 100644
index f4d3d86..0000000
--- a/vendor/github.com/coreos/etcd/raft/status.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type Status struct {
-	ID uint64
-
-	pb.HardState
-	SoftState
-
-	Applied  uint64
-	Progress map[uint64]Progress
-
-	LeadTransferee uint64
-}
-
-// getStatus gets a copy of the current raft status.
-func getStatus(r *raft) Status {
-	s := Status{
-		ID:             r.id,
-		LeadTransferee: r.leadTransferee,
-	}
-
-	s.HardState = r.hardState()
-	s.SoftState = *r.softState()
-
-	s.Applied = r.raftLog.applied
-
-	if s.RaftState == StateLeader {
-		s.Progress = make(map[uint64]Progress)
-		for id, p := range r.prs {
-			s.Progress[id] = *p
-		}
-
-		for id, p := range r.learnerPrs {
-			s.Progress[id] = *p
-		}
-	}
-
-	return s
-}
-
-// MarshalJSON translates the raft status into JSON.
-// TODO: try to simplify this by introducing ID type into raft
-func (s Status) MarshalJSON() ([]byte, error) {
-	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
-		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
-
-	if len(s.Progress) == 0 {
-		j += "},"
-	} else {
-		for k, v := range s.Progress {
-			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
-			j += subj
-		}
-		// remove the trailing ","
-		j = j[:len(j)-1] + "},"
-	}
-
-	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
-	return []byte(j), nil
-}
-
-func (s Status) String() string {
-	b, err := s.MarshalJSON()
-	if err != nil {
-		raftLogger.Panicf("unexpected error: %v", err)
-	}
-	return string(b)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
deleted file mode 100644
index 69c3a7d..0000000
--- a/vendor/github.com/coreos/etcd/raft/storage.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"errors"
-	"sync"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrCompacted is returned by Storage.Entries/Compact when a requested
-// index is unavailable because it predates the last snapshot.
-var ErrCompacted = errors.New("requested index is unavailable due to compaction")
-
-// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
-// index is older than the existing snapshot.
-var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
-
-// ErrUnavailable is returned by Storage interface when the requested log entries
-// are unavailable.
-var ErrUnavailable = errors.New("requested entry at index is unavailable")
-
-// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
-// snapshot is temporarily unavailable.
-var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
-
-// Storage is an interface that may be implemented by the application
-// to retrieve log entries from storage.
-//
-// If any Storage method returns an error, the raft instance will
-// become inoperable and refuse to participate in elections; the
-// application is responsible for cleanup and recovery in this case.
-type Storage interface {
-	// InitialState returns the saved HardState and ConfState information.
-	InitialState() (pb.HardState, pb.ConfState, error)
-	// Entries returns a slice of log entries in the range [lo,hi).
-	// MaxSize limits the total size of the log entries returned, but
-	// Entries returns at least one entry if any.
-	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
-	// Term returns the term of entry i, which must be in the range
-	// [FirstIndex()-1, LastIndex()]. The term of the entry before
-	// FirstIndex is retained for matching purposes even though the
-	// rest of that entry may not be available.
-	Term(i uint64) (uint64, error)
-	// LastIndex returns the index of the last entry in the log.
-	LastIndex() (uint64, error)
-	// FirstIndex returns the index of the first log entry that is
-	// possibly available via Entries (older entries have been incorporated
-	// into the latest Snapshot; if storage only contains the dummy entry the
-	// first log entry is not available).
-	FirstIndex() (uint64, error)
-	// Snapshot returns the most recent snapshot.
-	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
-	// so raft state machine could know that Storage needs some time to prepare
-	// snapshot and call Snapshot later.
-	Snapshot() (pb.Snapshot, error)
-}
-
-// MemoryStorage implements the Storage interface backed by an
-// in-memory array.
-type MemoryStorage struct {
-	// Protects access to all fields. Most methods of MemoryStorage are
-	// run on the raft goroutine, but Append() is run on an application
-	// goroutine.
-	sync.Mutex
-
-	hardState pb.HardState
-	snapshot  pb.Snapshot
-	// ents[i] has raft log position i+snapshot.Metadata.Index
-	ents []pb.Entry
-}
-
-// NewMemoryStorage creates an empty MemoryStorage.
-func NewMemoryStorage() *MemoryStorage {
-	return &MemoryStorage{
-		// When starting from scratch populate the list with a dummy entry at term zero.
-		ents: make([]pb.Entry, 1),
-	}
-}
-
-// InitialState implements the Storage interface.
-func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
-	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
-}
-
-// SetHardState saves the current HardState.
-func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
-	ms.Lock()
-	defer ms.Unlock()
-	ms.hardState = st
-	return nil
-}
-
-// Entries implements the Storage interface.
-func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if lo <= offset {
-		return nil, ErrCompacted
-	}
-	if hi > ms.lastIndex()+1 {
-		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
-	}
-	// only contains dummy entries.
-	if len(ms.ents) == 1 {
-		return nil, ErrUnavailable
-	}
-
-	ents := ms.ents[lo-offset : hi-offset]
-	return limitSize(ents, maxSize), nil
-}
-
-// Term implements the Storage interface.
-func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if i < offset {
-		return 0, ErrCompacted
-	}
-	if int(i-offset) >= len(ms.ents) {
-		return 0, ErrUnavailable
-	}
-	return ms.ents[i-offset].Term, nil
-}
-
-// LastIndex implements the Storage interface.
-func (ms *MemoryStorage) LastIndex() (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.lastIndex(), nil
-}
-
-func (ms *MemoryStorage) lastIndex() uint64 {
-	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
-}
-
-// FirstIndex implements the Storage interface.
-func (ms *MemoryStorage) FirstIndex() (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.firstIndex(), nil
-}
-
-func (ms *MemoryStorage) firstIndex() uint64 {
-	return ms.ents[0].Index + 1
-}
-
-// Snapshot implements the Storage interface.
-func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.snapshot, nil
-}
-
-// ApplySnapshot overwrites the contents of this Storage object with
-// those of the given snapshot.
-func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
-	ms.Lock()
-	defer ms.Unlock()
-
-	//handle check for old snapshot being applied
-	msIndex := ms.snapshot.Metadata.Index
-	snapIndex := snap.Metadata.Index
-	if msIndex >= snapIndex {
-		return ErrSnapOutOfDate
-	}
-
-	ms.snapshot = snap
-	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
-	return nil
-}
-
-// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
-// can be used to reconstruct the state at that point.
-// If any configuration changes have been made since the last compaction,
-// the result of the last ApplyConfChange must be passed in.
-func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	if i <= ms.snapshot.Metadata.Index {
-		return pb.Snapshot{}, ErrSnapOutOfDate
-	}
-
-	offset := ms.ents[0].Index
-	if i > ms.lastIndex() {
-		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
-	}
-
-	ms.snapshot.Metadata.Index = i
-	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
-	if cs != nil {
-		ms.snapshot.Metadata.ConfState = *cs
-	}
-	ms.snapshot.Data = data
-	return ms.snapshot, nil
-}
-
-// Compact discards all log entries prior to compactIndex.
-// It is the application's responsibility to not attempt to compact an index
-// greater than raftLog.applied.
-func (ms *MemoryStorage) Compact(compactIndex uint64) error {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if compactIndex <= offset {
-		return ErrCompacted
-	}
-	if compactIndex > ms.lastIndex() {
-		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
-	}
-
-	i := compactIndex - offset
-	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
-	ents[0].Index = ms.ents[i].Index
-	ents[0].Term = ms.ents[i].Term
-	ents = append(ents, ms.ents[i+1:]...)
-	ms.ents = ents
-	return nil
-}
-
-// Append the new entries to storage.
-// TODO (xiangli): ensure the entries are continuous and
-// entries[0].Index > ms.entries[0].Index
-func (ms *MemoryStorage) Append(entries []pb.Entry) error {
-	if len(entries) == 0 {
-		return nil
-	}
-
-	ms.Lock()
-	defer ms.Unlock()
-
-	first := ms.firstIndex()
-	last := entries[0].Index + uint64(len(entries)) - 1
-
-	// shortcut if there is no new entry.
-	if last < first {
-		return nil
-	}
-	// truncate compacted entries
-	if first > entries[0].Index {
-		entries = entries[first-entries[0].Index:]
-	}
-
-	offset := entries[0].Index - ms.ents[0].Index
-	switch {
-	case uint64(len(ms.ents)) > offset:
-		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
-		ms.ents = append(ms.ents, entries...)
-	case uint64(len(ms.ents)) == offset:
-		ms.ents = append(ms.ents, entries...)
-	default:
-		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
-			ms.lastIndex(), entries[0].Index)
-	}
-	return nil
-}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
deleted file mode 100644
index f4141fe..0000000
--- a/vendor/github.com/coreos/etcd/raft/util.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"fmt"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-func (st StateType) MarshalJSON() ([]byte, error) {
-	return []byte(fmt.Sprintf("%q", st.String())), nil
-}
-
-// uint64Slice implements sort interface
-type uint64Slice []uint64
-
-func (p uint64Slice) Len() int           { return len(p) }
-func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-func min(a, b uint64) uint64 {
-	if a > b {
-		return b
-	}
-	return a
-}
-
-func max(a, b uint64) uint64 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-func IsLocalMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
-		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
-}
-
-func IsResponseMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
-}
-
-// voteResponseType maps vote and prevote message types to their corresponding responses.
-func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
-	switch msgt {
-	case pb.MsgVote:
-		return pb.MsgVoteResp
-	case pb.MsgPreVote:
-		return pb.MsgPreVoteResp
-	default:
-		panic(fmt.Sprintf("not a vote message: %s", msgt))
-	}
-}
-
-// EntryFormatter can be implemented by the application to provide human-readable formatting
-// of entry data. Nil is a valid EntryFormatter and will use a default format.
-type EntryFormatter func([]byte) string
-
-// DescribeMessage returns a concise human-readable description of a
-// Message for debugging.
-func DescribeMessage(m pb.Message, f EntryFormatter) string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
-	if m.Reject {
-		fmt.Fprintf(&buf, " Rejected")
-		if m.RejectHint != 0 {
-			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
-		}
-	}
-	if m.Commit != 0 {
-		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
-	}
-	if len(m.Entries) > 0 {
-		fmt.Fprintf(&buf, " Entries:[")
-		for i, e := range m.Entries {
-			if i != 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(DescribeEntry(e, f))
-		}
-		fmt.Fprintf(&buf, "]")
-	}
-	if !IsEmptySnap(m.Snapshot) {
-		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
-	}
-	return buf.String()
-}
-
-// DescribeEntry returns a concise human-readable description of an
-// Entry for debugging.
-func DescribeEntry(e pb.Entry, f EntryFormatter) string {
-	var formatted string
-	if e.Type == pb.EntryNormal && f != nil {
-		formatted = f(e.Data)
-	} else {
-		formatted = fmt.Sprintf("%q", e.Data)
-	}
-	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
-}
-
-func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
-	if len(ents) == 0 {
-		return ents
-	}
-	size := ents[0].Size()
-	var limit int
-	for limit = 1; limit < len(ents); limit++ {
-		size += ents[limit].Size()
-		if uint64(size) > maxSize {
-			break
-		}
-	}
-	return ents[:limit]
-}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
deleted file mode 100644
index aa96ffa..0000000
--- a/vendor/github.com/coreos/etcd/version/version.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package version implements etcd version parsing and contains latest version
-// information.
-package version
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/coreos/go-semver/semver"
-)
-
-var (
-	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
-	MinClusterVersion = "3.0.0"
-	Version           = "3.3.25"
-	APIVersion        = "unknown"
-
-	// Git SHA Value will be set during build
-	GitSHA = "Not provided (use ./build instead of go build)"
-)
-
-func init() {
-	ver, err := semver.NewVersion(Version)
-	if err == nil {
-		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
-	}
-}
-
-type Versions struct {
-	Server  string `json:"etcdserver"`
-	Cluster string `json:"etcdcluster"`
-	// TODO: raft state machine version
-}
-
-// Cluster only keeps the major.minor.
-func Cluster(v string) string {
-	vs := strings.Split(v, ".")
-	if len(vs) <= 2 {
-		return v
-	}
-	return fmt.Sprintf("%s.%s", vs[0], vs[1])
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
index 76cf485..eb9fb7f 100644
--- a/vendor/github.com/coreos/go-semver/semver/semver.go
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -85,7 +85,7 @@
 		return fmt.Errorf("failed to validate metadata: %v", err)
 	}
 
-	parsed := make([]int64, 3, 3)
+	parsed := make([]int64, 3)
 
 	for i, v := range dotParts[:3] {
 		val, err := strconv.ParseInt(v, 10, 64)
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
deleted file mode 100644
index a0f4837..0000000
--- a/vendor/github.com/coreos/go-systemd/journal/journal.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package journal provides write bindings to the local systemd journal.
-// It is implemented in pure Go and connects to the journal directly over its
-// unix socket.
-//
-// To read from the journal, see the "sdjournal" package, which wraps the
-// sd-journal a C API.
-//
-// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
-package journal
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"os"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"syscall"
-	"unsafe"
-)
-
-// Priority of a journal message
-type Priority int
-
-const (
-	PriEmerg Priority = iota
-	PriAlert
-	PriCrit
-	PriErr
-	PriWarning
-	PriNotice
-	PriInfo
-	PriDebug
-)
-
-var (
-	// This can be overridden at build-time:
-	// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
-	journalSocket = "/run/systemd/journal/socket"
-
-	// unixConnPtr atomically holds the local unconnected Unix-domain socket.
-	// Concrete safe pointer type: *net.UnixConn
-	unixConnPtr unsafe.Pointer
-	// onceConn ensures that unixConnPtr is initialized exactly once.
-	onceConn sync.Once
-)
-
-func init() {
-	onceConn.Do(initConn)
-}
-
-// Enabled checks whether the local systemd journal is available for logging.
-func Enabled() bool {
-	onceConn.Do(initConn)
-
-	if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
-		return false
-	}
-
-	if _, err := net.Dial("unixgram", journalSocket); err != nil {
-		return false
-	}
-
-	return true
-}
-
-// Send a message to the local systemd journal. vars is a map of journald
-// fields to values.  Fields must be composed of uppercase letters, numbers,
-// and underscores, but must not start with an underscore. Within these
-// restrictions, any arbitrary field name may be used.  Some names have special
-// significance: see the journalctl documentation
-// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
-// for more details.  vars may be nil.
-func Send(message string, priority Priority, vars map[string]string) error {
-	conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
-	if conn == nil {
-		return errors.New("could not initialize socket to journald")
-	}
-
-	socketAddr := &net.UnixAddr{
-		Name: journalSocket,
-		Net:  "unixgram",
-	}
-
-	data := new(bytes.Buffer)
-	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
-	appendVariable(data, "MESSAGE", message)
-	for k, v := range vars {
-		appendVariable(data, k, v)
-	}
-
-	_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
-	if err == nil {
-		return nil
-	}
-	if !isSocketSpaceError(err) {
-		return err
-	}
-
-	// Large log entry, send it via tempfile and ancillary-fd.
-	file, err := tempFd()
-	if err != nil {
-		return err
-	}
-	defer file.Close()
-	_, err = io.Copy(file, data)
-	if err != nil {
-		return err
-	}
-	rights := syscall.UnixRights(int(file.Fd()))
-	_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Print prints a message to the local systemd journal using Send().
-func Print(priority Priority, format string, a ...interface{}) error {
-	return Send(fmt.Sprintf(format, a...), priority, nil)
-}
-
-func appendVariable(w io.Writer, name, value string) {
-	if err := validVarName(name); err != nil {
-		fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
-	}
-	if strings.ContainsRune(value, '\n') {
-		/* When the value contains a newline, we write:
-		 * - the variable name, followed by a newline
-		 * - the size (in 64bit little endian format)
-		 * - the data, followed by a newline
-		 */
-		fmt.Fprintln(w, name)
-		binary.Write(w, binary.LittleEndian, uint64(len(value)))
-		fmt.Fprintln(w, value)
-	} else {
-		/* just write the variable and value all on one line */
-		fmt.Fprintf(w, "%s=%s\n", name, value)
-	}
-}
-
-// validVarName validates a variable name to make sure journald will accept it.
-// The variable name must be in uppercase and consist only of characters,
-// numbers and underscores, and may not begin with an underscore:
-// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
-func validVarName(name string) error {
-	if name == "" {
-		return errors.New("Empty variable name")
-	} else if name[0] == '_' {
-		return errors.New("Variable name begins with an underscore")
-	}
-
-	for _, c := range name {
-		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
-			return errors.New("Variable name contains invalid characters")
-		}
-	}
-	return nil
-}
-
-// isSocketSpaceError checks whether the error is signaling
-// an "overlarge message" condition.
-func isSocketSpaceError(err error) bool {
-	opErr, ok := err.(*net.OpError)
-	if !ok || opErr == nil {
-		return false
-	}
-
-	sysErr, ok := opErr.Err.(*os.SyscallError)
-	if !ok || sysErr == nil {
-		return false
-	}
-
-	return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
-}
-
-// tempFd creates a temporary, unlinked file under `/dev/shm`.
-func tempFd() (*os.File, error) {
-	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
-	if err != nil {
-		return nil, err
-	}
-	err = syscall.Unlink(file.Name())
-	if err != nil {
-		return nil, err
-	}
-	return file, nil
-}
-
-// initConn initializes the global `unixConnPtr` socket.
-// It is meant to be called exactly once, at program startup.
-func initConn() {
-	autobind, err := net.ResolveUnixAddr("unixgram", "")
-	if err != nil {
-		return
-	}
-
-	sock, err := net.ListenUnixgram("unixgram", autobind)
-	if err != nil {
-		return
-	}
-
-	atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
-}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/LICENSE
rename to vendor/github.com/coreos/go-systemd/v22/LICENSE
diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/NOTICE
rename to vendor/github.com/coreos/go-systemd/v22/NOTICE
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
new file mode 100644
index 0000000..ac24c77
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
@@ -0,0 +1,46 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"fmt"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+	PriEmerg Priority = iota
+	PriAlert
+	PriCrit
+	PriErr
+	PriWarning
+	PriNotice
+	PriInfo
+	PriDebug
+)
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+	return Send(fmt.Sprintf(format, a...), priority, nil)
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
new file mode 100644
index 0000000..c5b23a8
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -0,0 +1,267 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	// This can be overridden at build-time:
+	// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+	journalSocket = "/run/systemd/journal/socket"
+
+	// unixConnPtr atomically holds the local unconnected Unix-domain socket.
+	// Concrete safe pointer type: *net.UnixConn
+	unixConnPtr unsafe.Pointer
+	// onceConn ensures that unixConnPtr is initialized exactly once.
+	onceConn sync.Once
+)
+
+// Enabled checks whether the local systemd journal is available for logging.
+func Enabled() bool {
+	if c := getOrInitConn(); c == nil {
+		return false
+	}
+
+	conn, err := net.Dial("unixgram", journalSocket)
+	if err != nil {
+		return false
+	}
+	defer conn.Close()
+
+	return true
+}
+
+// StderrIsJournalStream returns whether the process stderr is connected
+// to the Journal's stream transport.
+//
+// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stderr's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
+func StderrIsJournalStream() (bool, error) {
+	return fdIsJournalStream(syscall.Stderr)
+}
+
+// StdoutIsJournalStream returns whether the process stdout is connected
+// to the Journal's stream transport.
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stdout's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// Most users should probably use [StderrIsJournalStream].
+func StdoutIsJournalStream() (bool, error) {
+	return fdIsJournalStream(syscall.Stdout)
+}
+
+func fdIsJournalStream(fd int) (bool, error) {
+	journalStream := os.Getenv("JOURNAL_STREAM")
+	if journalStream == "" {
+		return false, nil
+	}
+
+	var expectedStat syscall.Stat_t
+	_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
+	if err != nil {
+		return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
+	}
+
+	var stat syscall.Stat_t
+	err = syscall.Fstat(fd, &stat)
+	if err != nil {
+		return false, err
+	}
+
+	match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
+	return match, nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values.  Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used.  Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details.  vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+	conn := getOrInitConn()
+	if conn == nil {
+		return errors.New("could not initialize socket to journald")
+	}
+
+	socketAddr := &net.UnixAddr{
+		Name: journalSocket,
+		Net:  "unixgram",
+	}
+
+	data := new(bytes.Buffer)
+	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+	appendVariable(data, "MESSAGE", message)
+	for k, v := range vars {
+		appendVariable(data, k, v)
+	}
+
+	_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+	if err == nil {
+		return nil
+	}
+	if !isSocketSpaceError(err) {
+		return err
+	}
+
+	// Large log entry, send it via tempfile and ancillary-fd.
+	file, err := tempFd()
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	_, err = io.Copy(file, data)
+	if err != nil {
+		return err
+	}
+	rights := syscall.UnixRights(int(file.Fd()))
+	_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
+func getOrInitConn() *net.UnixConn {
+	conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+	if conn != nil {
+		return conn
+	}
+	onceConn.Do(initConn)
+	return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+}
+
+func appendVariable(w io.Writer, name, value string) {
+	if err := validVarName(name); err != nil {
+		fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
+	}
+	if strings.ContainsRune(value, '\n') {
+		/* When the value contains a newline, we write:
+		 * - the variable name, followed by a newline
+		 * - the size (in 64bit little endian format)
+		 * - the data, followed by a newline
+		 */
+		fmt.Fprintln(w, name)
+		binary.Write(w, binary.LittleEndian, uint64(len(value)))
+		fmt.Fprintln(w, value)
+	} else {
+		/* just write the variable and value all on one line */
+		fmt.Fprintf(w, "%s=%s\n", name, value)
+	}
+}
+
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+	if name == "" {
+		return errors.New("Empty variable name")
+	} else if name[0] == '_' {
+		return errors.New("Variable name begins with an underscore")
+	}
+
+	for _, c := range name {
+		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+			return errors.New("Variable name contains invalid characters")
+		}
+	}
+	return nil
+}
+
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
+func isSocketSpaceError(err error) bool {
+	opErr, ok := err.(*net.OpError)
+	if !ok || opErr == nil {
+		return false
+	}
+
+	sysErr, ok := opErr.Err.(*os.SyscallError)
+	if !ok || sysErr == nil {
+		return false
+	}
+
+	return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
+}
+
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
+func tempFd() (*os.File, error) {
+	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+	if err != nil {
+		return nil, err
+	}
+	err = syscall.Unlink(file.Name())
+	if err != nil {
+		return nil, err
+	}
+	return file, nil
+}
+
+// initConn initializes the global `unixConnPtr` socket.
+// It is automatically called when needed.
+func initConn() {
+	autobind, err := net.ResolveUnixAddr("unixgram", "")
+	if err != nil {
+		return
+	}
+
+	sock, err := net.ListenUnixgram("unixgram", autobind)
+	if err != nil {
+		return
+	}
+
+	atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
new file mode 100644
index 0000000..322e41e
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"errors"
+)
+
+func Enabled() bool {
+	return false
+}
+
+func Send(message string, priority Priority, vars map[string]string) error {
+	return errors.New("could not initialize socket to journald")
+}
+
+func StderrIsJournalStream() (bool, error) {
+	return false, nil
+}
+
+func StdoutIsJournalStream() (bool, error) {
+	return false, nil
+}
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/vendor/github.com/coreos/pkg/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/pkg/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md
deleted file mode 100644
index f79dbfc..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# capnslog, the CoreOS logging package
-
-There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
-capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
-
-### Design Principles
-
-##### `package main` is the place where logging gets turned on and routed
-
-A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
-
-##### All log options are runtime-configurable. 
-
-Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
-
-##### There is one log object per package. It is registered under its repository and package name.
-
-`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
-
-##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
-
-Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
-
-Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
-
-##### Log objects are an interface
-
-An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
-
-##### Log levels have specific meanings:
-
-  * Critical: Unrecoverable. Must fail.
-  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
-  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
-  * Notice: Normal, but important (uncommon) log information.
-  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
-  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
-  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
-
diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go
deleted file mode 100644
index b305a84..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/formatters.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"log"
-	"runtime"
-	"strings"
-	"time"
-)
-
-type Formatter interface {
-	Format(pkg string, level LogLevel, depth int, entries ...interface{})
-	Flush()
-}
-
-func NewStringFormatter(w io.Writer) Formatter {
-	return &StringFormatter{
-		w: bufio.NewWriter(w),
-	}
-}
-
-type StringFormatter struct {
-	w *bufio.Writer
-}
-
-func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
-	now := time.Now().UTC()
-	s.w.WriteString(now.Format(time.RFC3339))
-	s.w.WriteByte(' ')
-	writeEntries(s.w, pkg, l, i, entries...)
-	s.Flush()
-}
-
-func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	if pkg != "" {
-		w.WriteString(pkg + ": ")
-	}
-	str := fmt.Sprint(entries...)
-	endsInNL := strings.HasSuffix(str, "\n")
-	w.WriteString(str)
-	if !endsInNL {
-		w.WriteString("\n")
-	}
-}
-
-func (s *StringFormatter) Flush() {
-	s.w.Flush()
-}
-
-func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
-	return &PrettyFormatter{
-		w:     bufio.NewWriter(w),
-		debug: debug,
-	}
-}
-
-type PrettyFormatter struct {
-	w     *bufio.Writer
-	debug bool
-}
-
-func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
-	now := time.Now()
-	ts := now.Format("2006-01-02 15:04:05")
-	c.w.WriteString(ts)
-	ms := now.Nanosecond() / 1000
-	c.w.WriteString(fmt.Sprintf(".%06d", ms))
-	if c.debug {
-		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-		if !ok {
-			file = "???"
-			line = 1
-		} else {
-			slash := strings.LastIndex(file, "/")
-			if slash >= 0 {
-				file = file[slash+1:]
-			}
-		}
-		if line < 0 {
-			line = 0 // not a real line number
-		}
-		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
-	}
-	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
-	writeEntries(c.w, pkg, l, depth, entries...)
-	c.Flush()
-}
-
-func (c *PrettyFormatter) Flush() {
-	c.w.Flush()
-}
-
-// LogFormatter emulates the form of the traditional built-in logger.
-type LogFormatter struct {
-	logger *log.Logger
-	prefix string
-}
-
-// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
-// golang log package to actually do the logging work so that logs look similar.
-func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
-	return &LogFormatter{
-		logger: log.New(w, "", flag), // don't use prefix here
-		prefix: prefix,               // save it instead
-	}
-}
-
-// Format builds a log message for the LogFormatter. The LogLevel is ignored.
-func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	str := fmt.Sprint(entries...)
-	prefix := lf.prefix
-	if pkg != "" {
-		prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
-	}
-	lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (lf *LogFormatter) Flush() {
-	// noop
-}
-
-// NilFormatter is a no-op log formatter that does nothing.
-type NilFormatter struct {
-}
-
-// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
-// messages so that you can cause part of your logging to be silent.
-func NewNilFormatter() Formatter {
-	return &NilFormatter{}
-}
-
-// Format does nothing.
-func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
-	// noop
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (_ *NilFormatter) Flush() {
-	// noop
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
deleted file mode 100644
index 426603e..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"os"
-	"runtime"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var pid = os.Getpid()
-
-type GlogFormatter struct {
-	StringFormatter
-}
-
-func NewGlogFormatter(w io.Writer) *GlogFormatter {
-	g := &GlogFormatter{}
-	g.w = bufio.NewWriter(w)
-	return g
-}
-
-func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
-	g.w.Write(GlogHeader(level, depth+1))
-	g.StringFormatter.Format(pkg, level, depth+1, entries...)
-}
-
-func GlogHeader(level LogLevel, depth int) []byte {
-	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
-	now := time.Now().UTC()
-	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		if slash >= 0 {
-			file = file[slash+1:]
-		}
-	}
-	if line < 0 {
-		line = 0 // not a real line number
-	}
-	buf := &bytes.Buffer{}
-	buf.Grow(30)
-	_, month, day := now.Date()
-	hour, minute, second := now.Clock()
-	buf.WriteString(level.Char())
-	twoDigits(buf, int(month))
-	twoDigits(buf, day)
-	buf.WriteByte(' ')
-	twoDigits(buf, hour)
-	buf.WriteByte(':')
-	twoDigits(buf, minute)
-	buf.WriteByte(':')
-	twoDigits(buf, second)
-	buf.WriteByte('.')
-	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
-	buf.WriteByte('Z')
-	buf.WriteByte(' ')
-	buf.WriteString(strconv.Itoa(pid))
-	buf.WriteByte(' ')
-	buf.WriteString(file)
-	buf.WriteByte(':')
-	buf.WriteString(strconv.Itoa(line))
-	buf.WriteByte(']')
-	buf.WriteByte(' ')
-	return buf.Bytes()
-}
-
-const digits = "0123456789"
-
-func twoDigits(b *bytes.Buffer, d int) {
-	c2 := digits[d%10]
-	d /= 10
-	c1 := digits[d%10]
-	b.WriteByte(c1)
-	b.WriteByte(c2)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
deleted file mode 100644
index 38ce6d2..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"io"
-	"os"
-	"syscall"
-)
-
-// Here's where the opinionation comes in. We need some sensible defaults,
-// especially after taking over the log package. Your project (whatever it may
-// be) may see things differently. That's okay; there should be no defaults in
-// the main package that cannot be controlled or overridden programatically,
-// otherwise it's a bug. Doing so is creating your own init_log.go file much
-// like this one.
-
-func init() {
-	initHijack()
-
-	// Go `log` package uses os.Stderr.
-	SetFormatter(NewDefaultFormatter(os.Stderr))
-	SetGlobalLogLevel(INFO)
-}
-
-func NewDefaultFormatter(out io.Writer) Formatter {
-	if syscall.Getppid() == 1 {
-		// We're running under init, which may be systemd.
-		f, err := NewJournaldFormatter()
-		if err == nil {
-			return f
-		}
-	}
-	return NewPrettyFormatter(out, false)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
deleted file mode 100644
index 4553050..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import "os"
-
-func init() {
-	initHijack()
-
-	// Go `log` package uses os.Stderr.
-	SetFormatter(NewPrettyFormatter(os.Stderr, false))
-	SetGlobalLogLevel(INFO)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
deleted file mode 100644
index 72e0520..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/go-systemd/journal"
-)
-
-func NewJournaldFormatter() (Formatter, error) {
-	if !journal.Enabled() {
-		return nil, errors.New("No systemd detected")
-	}
-	return &journaldFormatter{}, nil
-}
-
-type journaldFormatter struct{}
-
-func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	var pri journal.Priority
-	switch l {
-	case CRITICAL:
-		pri = journal.PriCrit
-	case ERROR:
-		pri = journal.PriErr
-	case WARNING:
-		pri = journal.PriWarning
-	case NOTICE:
-		pri = journal.PriNotice
-	case INFO:
-		pri = journal.PriInfo
-	case DEBUG:
-		pri = journal.PriDebug
-	case TRACE:
-		pri = journal.PriDebug
-	default:
-		panic("Unhandled loglevel")
-	}
-	msg := fmt.Sprint(entries...)
-	tags := map[string]string{
-		"PACKAGE":           pkg,
-		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
-	}
-	err := journal.Send(msg, pri, tags)
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-	}
-}
-
-func (j *journaldFormatter) Flush() {}
diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
deleted file mode 100644
index 970086b..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"log"
-)
-
-func initHijack() {
-	pkg := NewPackageLogger("log", "")
-	w := packageWriter{pkg}
-	log.SetFlags(0)
-	log.SetPrefix("")
-	log.SetOutput(w)
-}
-
-type packageWriter struct {
-	pl *PackageLogger
-}
-
-func (p packageWriter) Write(b []byte) (int, error) {
-	if p.pl.level < INFO {
-		return 0, nil
-	}
-	p.pl.internalLog(calldepth+2, INFO, string(b))
-	return len(b), nil
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go
deleted file mode 100644
index 226b60c..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/logmap.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"errors"
-	"strings"
-	"sync"
-)
-
-// LogLevel is the set of all log levels.
-type LogLevel int8
-
-const (
-	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
-	CRITICAL LogLevel = iota - 1
-	// ERROR is for errors that are not fatal but lead to troubling behavior.
-	ERROR
-	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
-	WARNING
-	// NOTICE is for normal but significant conditions.
-	NOTICE
-	// INFO is a log level for common, everyday log updates.
-	INFO
-	// DEBUG is the default hidden level for more verbose updates about internal processes.
-	DEBUG
-	// TRACE is for (potentially) call by call tracing of programs.
-	TRACE
-)
-
-// Char returns a single-character representation of the log level.
-func (l LogLevel) Char() string {
-	switch l {
-	case CRITICAL:
-		return "C"
-	case ERROR:
-		return "E"
-	case WARNING:
-		return "W"
-	case NOTICE:
-		return "N"
-	case INFO:
-		return "I"
-	case DEBUG:
-		return "D"
-	case TRACE:
-		return "T"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// String returns a multi-character representation of the log level.
-func (l LogLevel) String() string {
-	switch l {
-	case CRITICAL:
-		return "CRITICAL"
-	case ERROR:
-		return "ERROR"
-	case WARNING:
-		return "WARNING"
-	case NOTICE:
-		return "NOTICE"
-	case INFO:
-		return "INFO"
-	case DEBUG:
-		return "DEBUG"
-	case TRACE:
-		return "TRACE"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// Update using the given string value. Fulfills the flag.Value interface.
-func (l *LogLevel) Set(s string) error {
-	value, err := ParseLevel(s)
-	if err != nil {
-		return err
-	}
-
-	*l = value
-	return nil
-}
-
-// Returns an empty string, only here to fulfill the pflag.Value interface.
-func (l *LogLevel) Type() string {
-	return ""
-}
-
-// ParseLevel translates some potential loglevel strings into their corresponding levels.
-func ParseLevel(s string) (LogLevel, error) {
-	switch s {
-	case "CRITICAL", "C":
-		return CRITICAL, nil
-	case "ERROR", "0", "E":
-		return ERROR, nil
-	case "WARNING", "1", "W":
-		return WARNING, nil
-	case "NOTICE", "2", "N":
-		return NOTICE, nil
-	case "INFO", "3", "I":
-		return INFO, nil
-	case "DEBUG", "4", "D":
-		return DEBUG, nil
-	case "TRACE", "5", "T":
-		return TRACE, nil
-	}
-	return CRITICAL, errors.New("couldn't parse log level " + s)
-}
-
-type RepoLogger map[string]*PackageLogger
-
-type loggerStruct struct {
-	sync.Mutex
-	repoMap   map[string]RepoLogger
-	formatter Formatter
-}
-
-// logger is the global logger
-var logger = new(loggerStruct)
-
-// SetGlobalLogLevel sets the log level for all packages in all repositories
-// registered with capnslog.
-func SetGlobalLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	for _, r := range logger.repoMap {
-		r.setRepoLogLevelInternal(l)
-	}
-}
-
-// GetRepoLogger may return the handle to the repository's set of packages' loggers.
-func GetRepoLogger(repo string) (RepoLogger, error) {
-	logger.Lock()
-	defer logger.Unlock()
-	r, ok := logger.repoMap[repo]
-	if !ok {
-		return nil, errors.New("no packages registered for repo " + repo)
-	}
-	return r, nil
-}
-
-// MustRepoLogger returns the handle to the repository's packages' loggers.
-func MustRepoLogger(repo string) RepoLogger {
-	r, err := GetRepoLogger(repo)
-	if err != nil {
-		panic(err)
-	}
-	return r
-}
-
-// SetRepoLogLevel sets the log level for all packages in the repository.
-func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	r.setRepoLogLevelInternal(l)
-}
-
-func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
-	for _, v := range r {
-		v.level = l
-	}
-}
-
-// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
-// order, and returns a map of the results, for use in SetLogLevel.
-func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
-	setlist := strings.Split(conf, ",")
-	out := make(map[string]LogLevel)
-	for _, setstring := range setlist {
-		setting := strings.Split(setstring, "=")
-		if len(setting) != 2 {
-			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
-		}
-		l, err := ParseLevel(setting[1])
-		if err != nil {
-			return nil, err
-		}
-		out[setting[0]] = l
-	}
-	return out, nil
-}
-
-// SetLogLevel takes a map of package names within a repository to their desired
-// loglevel, and sets the levels appropriately. Unknown packages are ignored.
-// "*" is a special package name that corresponds to all packages, and will be
-// processed first.
-func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	if l, ok := m["*"]; ok {
-		r.setRepoLogLevelInternal(l)
-	}
-	for k, v := range m {
-		l, ok := r[k]
-		if !ok {
-			continue
-		}
-		l.level = v
-	}
-}
-
-// SetFormatter sets the formatting function for all logs.
-func SetFormatter(f Formatter) {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter = f
-}
-
-// NewPackageLogger creates a package logger object.
-// This should be defined as a global var in your package, referencing your repo.
-func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
-	logger.Lock()
-	defer logger.Unlock()
-	if logger.repoMap == nil {
-		logger.repoMap = make(map[string]RepoLogger)
-	}
-	r, rok := logger.repoMap[repo]
-	if !rok {
-		logger.repoMap[repo] = make(RepoLogger)
-		r = logger.repoMap[repo]
-	}
-	p, pok := r[pkg]
-	if !pok {
-		r[pkg] = &PackageLogger{
-			pkg:   pkg,
-			level: INFO,
-		}
-		p = r[pkg]
-	}
-	return
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
deleted file mode 100644
index 00ff371..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"fmt"
-	"os"
-)
-
-type PackageLogger struct {
-	pkg   string
-	level LogLevel
-}
-
-const calldepth = 2
-
-func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
-	logger.Lock()
-	defer logger.Unlock()
-	if inLevel != CRITICAL && p.level < inLevel {
-		return
-	}
-	if logger.formatter != nil {
-		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
-	}
-}
-
-// SetLevel allows users to change the current logging level.
-func (p *PackageLogger) SetLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	p.level = l
-}
-
-// LevelAt checks if the given log level will be outputted under current setting.
-func (p *PackageLogger) LevelAt(l LogLevel) bool {
-	logger.Lock()
-	defer logger.Unlock()
-	return p.level >= l
-}
-
-// Log a formatted string at any level between ERROR and TRACE
-func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
-}
-
-// Log a message at any level between ERROR and TRACE
-func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprint(args...))
-}
-
-// log stdlib compatibility
-
-func (p *PackageLogger) Println(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
-}
-
-func (p *PackageLogger) Printf(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Print(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
-}
-
-// Panic and fatal
-
-func (p *PackageLogger) Panicf(format string, args ...interface{}) {
-	s := fmt.Sprintf(format, args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panic(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panicln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
-	p.Logf(CRITICAL, format, args...)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatal(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatalln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-// Error Functions
-
-func (p *PackageLogger) Errorf(format string, args ...interface{}) {
-	p.Logf(ERROR, format, args...)
-}
-
-func (p *PackageLogger) Error(entries ...interface{}) {
-	p.internalLog(calldepth, ERROR, entries...)
-}
-
-// Warning Functions
-
-func (p *PackageLogger) Warningf(format string, args ...interface{}) {
-	p.Logf(WARNING, format, args...)
-}
-
-func (p *PackageLogger) Warning(entries ...interface{}) {
-	p.internalLog(calldepth, WARNING, entries...)
-}
-
-// Notice Functions
-
-func (p *PackageLogger) Noticef(format string, args ...interface{}) {
-	p.Logf(NOTICE, format, args...)
-}
-
-func (p *PackageLogger) Notice(entries ...interface{}) {
-	p.internalLog(calldepth, NOTICE, entries...)
-}
-
-// Info Functions
-
-func (p *PackageLogger) Infof(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Info(entries ...interface{}) {
-	p.internalLog(calldepth, INFO, entries...)
-}
-
-// Debug Functions
-
-func (p *PackageLogger) Debugf(format string, args ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.Logf(DEBUG, format, args...)
-}
-
-func (p *PackageLogger) Debug(entries ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.internalLog(calldepth, DEBUG, entries...)
-}
-
-// Trace Functions
-
-func (p *PackageLogger) Tracef(format string, args ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.Logf(TRACE, format, args...)
-}
-
-func (p *PackageLogger) Trace(entries ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.internalLog(calldepth, TRACE, entries...)
-}
-
-func (p *PackageLogger) Flush() {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter.Flush()
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
deleted file mode 100644
index 4be5a1f..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"fmt"
-	"log/syslog"
-)
-
-func NewSyslogFormatter(w *syslog.Writer) Formatter {
-	return &syslogFormatter{w}
-}
-
-func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
-	w, err := syslog.New(syslog.LOG_DEBUG, tag)
-	if err != nil {
-		return nil, err
-	}
-	return NewSyslogFormatter(w), nil
-}
-
-type syslogFormatter struct {
-	w *syslog.Writer
-}
-
-func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	for _, entry := range entries {
-		str := fmt.Sprint(entry)
-		switch l {
-		case CRITICAL:
-			s.w.Crit(str)
-		case ERROR:
-			s.w.Err(str)
-		case WARNING:
-			s.w.Warning(str)
-		case NOTICE:
-			s.w.Notice(str)
-		case INFO:
-			s.w.Info(str)
-		case DEBUG:
-			s.w.Debug(str)
-		case TRACE:
-			s.w.Debug(str)
-		default:
-			panic("Unhandled loglevel")
-		}
-	}
-}
-
-func (s *syslogFormatter) Flush() {
-}
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
index 2d1b3d9..76f5007 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/README.md
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -1,7 +1,7 @@
 circuit-breaker
 ===============
 
-[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![Golang CI](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml/badge.svg)](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml)
 [![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
 [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
 
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
index f88ca72..9214386 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -12,10 +12,13 @@
 // because the breaker is currently open.
 var ErrBreakerOpen = errors.New("circuit breaker is open")
 
+// State is a type representing the possible states of a circuit breaker.
+type State uint32
+
 const (
-	closed uint32 = iota
-	open
-	halfOpen
+	Closed State = iota
+	Open
+	HalfOpen
 )
 
 // Breaker implements the circuit-breaker resiliency pattern
@@ -24,7 +27,7 @@
 	timeout                          time.Duration
 
 	lock              sync.Mutex
-	state             uint32
+	state             State
 	errors, successes int
 	lastError         time.Time
 }
@@ -46,9 +49,9 @@
 // already open, or it will run the given function and pass along its return
 // value. It is safe to call Run concurrently on the same Breaker.
 func (b *Breaker) Run(work func() error) error {
-	state := atomic.LoadUint32(&b.state)
+	state := b.GetState()
 
-	if state == open {
+	if state == Open {
 		return ErrBreakerOpen
 	}
 
@@ -61,9 +64,9 @@
 // the return value of the function. It is safe to call Go concurrently on the
 // same Breaker.
 func (b *Breaker) Go(work func() error) error {
-	state := atomic.LoadUint32(&b.state)
+	state := b.GetState()
 
-	if state == open {
+	if state == Open {
 		return ErrBreakerOpen
 	}
 
@@ -75,7 +78,13 @@
 	return nil
 }
 
-func (b *Breaker) doWork(state uint32, work func() error) error {
+// GetState returns the current State of the circuit-breaker at the moment
+// that it is called.
+func (b *Breaker) GetState() State {
+	return (State)(atomic.LoadUint32((*uint32)(&b.state)))
+}
+
+func (b *Breaker) doWork(state State, work func() error) error {
 	var panicValue interface{}
 
 	result := func() error {
@@ -85,7 +94,7 @@
 		return work()
 	}()
 
-	if result == nil && panicValue == nil && state == closed {
+	if result == nil && panicValue == nil && state == Closed {
 		// short-circuit the normal, success path without contending
 		// on the lock
 		return nil
@@ -108,7 +117,7 @@
 	defer b.lock.Unlock()
 
 	if result == nil && panicValue == nil {
-		if b.state == halfOpen {
+		if b.state == HalfOpen {
 			b.successes++
 			if b.successes == b.successThreshold {
 				b.closeBreaker()
@@ -123,26 +132,26 @@
 		}
 
 		switch b.state {
-		case closed:
+		case Closed:
 			b.errors++
 			if b.errors == b.errorThreshold {
 				b.openBreaker()
 			} else {
 				b.lastError = time.Now()
 			}
-		case halfOpen:
+		case HalfOpen:
 			b.openBreaker()
 		}
 	}
 }
 
 func (b *Breaker) openBreaker() {
-	b.changeState(open)
+	b.changeState(Open)
 	go b.timer()
 }
 
 func (b *Breaker) closeBreaker() {
-	b.changeState(closed)
+	b.changeState(Closed)
 }
 
 func (b *Breaker) timer() {
@@ -151,11 +160,11 @@
 	b.lock.Lock()
 	defer b.lock.Unlock()
 
-	b.changeState(halfOpen)
+	b.changeState(HalfOpen)
 }
 
-func (b *Breaker) changeState(newState uint32) {
+func (b *Breaker) changeState(newState State) {
 	b.errors = 0
 	b.successes = 0
-	atomic.StoreUint32(&b.state, newState)
+	atomic.StoreUint32((*uint32)(&b.state), (uint32)(newState))
 }
diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
deleted file mode 100644
index 6a46f47..0000000
--- a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build gofuzz
-
-package snappy
-
-func Fuzz(data []byte) int {
-	decode, err := Decode(data)
-	if decode == nil && err == nil {
-		panic("nil error with nil result")
-	}
-
-	if err != nil {
-		return 0
-	}
-
-	return 1
-}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
index ea8f7af..c2eb205 100644
--- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -25,10 +25,10 @@
 )
 
 func min(x, y int) int {
-    if x < y {
-        return x
-    }
-    return y
+	if x < y {
+		return x
+	}
+	return y
 }
 
 // Encode encodes data as snappy with no framing header.
@@ -48,14 +48,14 @@
 
 	// Snappy encode in blocks of maximum 32KB
 	var (
-		max = len(src)
+		max       = len(src)
 		blockSize = 32 * 1024
-		pos   = 0
-		chunk []byte
+		pos       = 0
+		chunk     []byte
 	)
 
 	for pos < max {
-		newPos := min(pos + blockSize, max)
+		newPos := min(pos+blockSize, max)
 		chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos])
 
 		// First encode the compressed size (big-endian)
@@ -83,13 +83,23 @@
 // for use by this function. If `dst` is nil *or* insufficiently large to hold
 // the decoded `src`, new space will be allocated.
 func DecodeInto(dst, src []byte) ([]byte, error) {
+	if len(src) < 8 || !bytes.Equal(src[:8], xerialHeader) {
+		dst, err := master.Decode(dst[:cap(dst)], src)
+		if err != nil && len(src) < len(xerialHeader) {
+			// Keep compatibility and return ErrMalformed when there is a
+			// short or truncated header.
+			return nil, ErrMalformed
+		}
+		return dst, err
+	}
+
 	var max = len(src)
 	if max < len(xerialHeader) {
 		return nil, ErrMalformed
 	}
 
-	if !bytes.Equal(src[:8], xerialHeader) {
-		return master.Decode(dst[:cap(dst)], src)
+	if max == sizeOffset {
+		return []byte{}, nil
 	}
 
 	if max < sizeOffset+sizeBytes {
@@ -104,7 +114,7 @@
 	var (
 		pos   = sizeOffset
 		chunk []byte
-		err       error
+		err   error
 	)
 
 	for pos+sizeBytes <= max {
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
index 1e8d238..de51455 100644
--- a/vendor/github.com/go-redis/redis/v8/.golangci.yml
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -2,20 +2,3 @@
   concurrency: 8
   deadline: 5m
   tests: false
-linters:
-  enable-all: true
-  disable:
-    - funlen
-    - gochecknoglobals
-    - gochecknoinits
-    - gocognit
-    - goconst
-    - godox
-    - gosec
-    - maligned
-    - wsl
-    - gomnd
-    - goerr113
-    - exhaustive
-    - nestif
-    - nlreturn
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.prettierrc
rename to vendor/github.com/go-redis/redis/v8/.prettierrc.yml
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
deleted file mode 100644
index 1bf578d..0000000
--- a/vendor/github.com/go-redis/redis/v8/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-dist: xenial
-language: go
-
-services:
-  - redis-server
-
-go:
-  - 1.14.x
-  - 1.15.x
-  - tip
-
-matrix:
-  allow_failures:
-    - go: tip
-
-go_import_path: github.com/go-redis/redis
-
-before_install:
-  - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
-    -b $(go env GOPATH)/bin v1.31.0
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
index 8392d54..195e519 100644
--- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -1,5 +1,177 @@
-# Changelog
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
 
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
 
-See https://redis.uptrace.dev/changelog/
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+  single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+  decision:
+
+  - Traces become smaller and less noisy.
+  - It may be costly to process those 3 extra spans for each query.
+  - go-redis no longer depends on OpenTelemetry.
+
+  Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+  Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+  `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+  you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+  struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+  [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+  using `context.Context` yet, the simplest option is to define global package variable
+  `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+  [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+  existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+    NewConsistentHash: func() {
+        return consistenthash.New(100, crc32.ChecksumIEEE)
+    },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+  URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+  interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+  transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+  when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+  detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+  the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+  `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+  occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+  Servers that don't have cluster mode enabled. See
+  https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
index 49e4c96..a4cfe05 100644
--- a/vendor/github.com/go-redis/redis/v8/Makefile
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -1,10 +1,11 @@
-all: testdeps
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
 	go test ./...
 	go test ./... -short -race
 	go test ./... -run=NONE -bench=. -benchmem
 	env GOOS=linux GOARCH=386 go test ./...
 	go vet
-	golangci-lint run
 
 testdeps: testdata/redis/src/redis-server
 
@@ -15,7 +16,20 @@
 
 testdata/redis:
 	mkdir -p $@
-	wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+	wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
 
 testdata/redis/src/redis-server: testdata/redis
 	cd $< && make all
+
+fmt:
+	gofmt -w -s ./
+	goimports -w  -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+	go get -u && go mod tidy
+	set -e; for dir in $(PACKAGE_DIRS); do \
+	  echo "go mod tidy in $${dir}"; \
+	  (cd "$${dir}" && \
+	    go get -u && \
+	    go mod tidy); \
+	done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
index da5d0fb..f3b6a01 100644
--- a/vendor/github.com/go-redis/redis/v8/README.md
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -1,23 +1,32 @@
-# Redis client for Golang
+# Redis client for Go
 
-[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
 [![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
 [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
-[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
 
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
 
-- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
 - [Documentation](https://redis.uptrace.dev)
 - [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
 - [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
 - [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
 
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
 ## Ecosystem
 
-- [Distributed Locks](https://github.com/bsm/redislock).
-- [Redis Cache](https://github.com/go-redis/cache).
-- [Rate limiting](https://github.com/go-redis/redis_rate).
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
 
 ## Features
 
@@ -26,16 +35,16 @@
   [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
 - [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
 - [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
-  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
 - [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
 - [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
 - [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
 - [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
   without using cluster mode and Redis Sentinel.
 - [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
 
 ## Installation
 
@@ -47,7 +56,7 @@
 go mod init github.com/my/repo
 ```
 
-And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
 
 ```shell
 go get github.com/go-redis/redis/v8
@@ -59,6 +68,7 @@
 import (
     "context"
     "github.com/go-redis/redis/v8"
+    "fmt"
 )
 
 var ctx = context.Background()
@@ -129,9 +139,37 @@
 res, err := rdb.Do(ctx, "set", "key", "value").Result()
 ```
 
-## See also
+## Run the test
 
-- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
-- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
-- [Golang msgpack](https://github.com/vmihailenco/msgpack)
-- [Golang message task queue](https://github.com/vmihailenco/taskq)
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+  <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
index a6ce5c5..a54f2f3 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -68,6 +68,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	// PoolSize applies per cluster node and not for the whole cluster.
 	PoolSize           int
 	MinIdleConns       int
@@ -86,12 +89,12 @@
 		opt.MaxRedirects = 3
 	}
 
-	if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+	if opt.RouteByLatency || opt.RouteRandomly {
 		opt.ReadOnly = true
 	}
 
 	if opt.PoolSize == 0 {
-		opt.PoolSize = 5 * runtime.NumCPU()
+		opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
 	}
 
 	switch opt.ReadTimeout {
@@ -146,6 +149,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
@@ -153,9 +157,13 @@
 		IdleTimeout:        opt.IdleTimeout,
 		IdleCheckFrequency: disableIdleCheck,
 
-		readOnly: opt.ReadOnly,
-
 		TLSConfig: opt.TLSConfig,
+		// If ClusterSlots is populated, then we probably have an artificial
+		// cluster whose nodes are not in clustering mode (otherwise there isn't
+		// much use for ClusterSlots config).  This means we cannot execute the
+		// READONLY command against that node -- setting readOnly to false in such
+		// situations in the options below will prevent that from happening.
+		readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
 	}
 }
 
@@ -291,8 +299,9 @@
 
 func (c *clusterNodes) Addrs() ([]string, error) {
 	var addrs []string
+
 	c.mu.RLock()
-	closed := c.closed
+	closed := c.closed //nolint:ifshort
 	if !closed {
 		if len(c.activeAddrs) > 0 {
 			addrs = c.activeAddrs
@@ -343,7 +352,7 @@
 	}
 }
 
-func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
 	node, err := c.get(addr)
 	if err != nil {
 		return nil, err
@@ -407,7 +416,7 @@
 	}
 
 	n := rand.Intn(len(addrs))
-	return c.Get(addrs[n])
+	return c.GetOrCreate(addrs[n])
 }
 
 //------------------------------------------------------------------------------
@@ -465,7 +474,7 @@
 				addr = replaceLoopbackHost(addr, originHost)
 			}
 
-			node, err := c.nodes.Get(addr)
+			node, err := c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return nil, err
 			}
@@ -586,8 +595,16 @@
 	if len(nodes) == 0 {
 		return c.nodes.Random()
 	}
-	n := rand.Intn(len(nodes))
-	return nodes[n], nil
+	if len(nodes) == 1 {
+		return nodes[0], nil
+	}
+	randomNodes := rand.Perm(len(nodes))
+	for _, idx := range randomNodes {
+		if node := nodes[idx]; !node.Failing() {
+			return node, nil
+		}
+	}
+	return nodes[randomNodes[0]], nil
 }
 
 func (c *clusterState) slotNodes(slot int) []*clusterNode {
@@ -628,14 +645,14 @@
 	return state, nil
 }
 
-func (c *clusterStateHolder) LazyReload(ctx context.Context) {
+func (c *clusterStateHolder) LazyReload() {
 	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
 		return
 	}
 	go func() {
 		defer atomic.StoreUint32(&c.reloading, 0)
 
-		_, err := c.Reload(ctx)
+		_, err := c.Reload(context.Background())
 		if err != nil {
 			return
 		}
@@ -645,14 +662,15 @@
 
 func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
 	v := c.state.Load()
-	if v != nil {
-		state := v.(*clusterState)
-		if time.Since(state.createdAt) > 10*time.Second {
-			c.LazyReload(ctx)
-		}
-		return state, nil
+	if v == nil {
+		return c.Reload(ctx)
 	}
-	return c.Reload(ctx)
+
+	state := v.(*clusterState)
+	if time.Since(state.createdAt) > 10*time.Second {
+		c.LazyReload()
+	}
+	return state, nil
 }
 
 func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
@@ -728,7 +746,7 @@
 // ReloadState reloads cluster state. If available it calls ClusterSlots func
 // to get cluster slots information.
 func (c *ClusterClient) ReloadState(ctx context.Context) {
-	c.state.LazyReload(ctx)
+	c.state.LazyReload()
 }
 
 // Close closes the cluster client, releasing any open resources.
@@ -789,7 +807,7 @@
 		}
 		if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
 			if isReadOnly {
-				c.state.LazyReload(ctx)
+				c.state.LazyReload()
 			}
 			node = nil
 			continue
@@ -806,8 +824,10 @@
 		var addr string
 		moved, ask, addr = isMovedError(lastErr)
 		if moved || ask {
+			c.state.LazyReload()
+
 			var err error
-			node, err = c.nodes.Get(addr)
+			node, err = c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return err
 			}
@@ -1004,7 +1024,7 @@
 	for _, idx := range rand.Perm(len(addrs)) {
 		addr := addrs[idx]
 
-		node, err := c.nodes.Get(addr)
+		node, err := c.nodes.GetOrCreate(addr)
 		if err != nil {
 			if firstErr == nil {
 				firstErr = err
@@ -1218,13 +1238,13 @@
 		return false
 	}
 
-	node, err := c.nodes.Get(addr)
+	node, err := c.nodes.GetOrCreate(addr)
 	if err != nil {
 		return false
 	}
 
 	if moved {
-		c.state.LazyReload(ctx)
+		c.state.LazyReload()
 		failedCmds.Add(node, cmd)
 		return true
 	}
@@ -1252,10 +1272,13 @@
 }
 
 func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
-	return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+	return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
 }
 
 func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	// Trim multi .. exec.
+	cmds = cmds[1 : len(cmds)-1]
+
 	state, err := c.state.Get(ctx)
 	if err != nil {
 		setCmdsErr(cmds, err)
@@ -1291,6 +1314,7 @@
 					if err == nil {
 						return
 					}
+
 					if attempt < c.opt.MaxRedirects {
 						if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
 							setCmdsErr(cmds, err)
@@ -1400,13 +1424,13 @@
 	addr string,
 	failedCmds *cmdsMap,
 ) error {
-	node, err := c.nodes.Get(addr)
+	node, err := c.nodes.GetOrCreate(addr)
 	if err != nil {
 		return err
 	}
 
 	if moved {
-		c.state.LazyReload(ctx)
+		c.state.LazyReload()
 		for _, cmd := range cmds {
 			failedCmds.Add(node, cmd)
 		}
@@ -1455,7 +1479,7 @@
 
 		moved, ask, addr := isMovedError(err)
 		if moved || ask {
-			node, err = c.nodes.Get(addr)
+			node, err = c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return err
 			}
@@ -1464,7 +1488,7 @@
 
 		if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
 			if isReadOnly {
-				c.state.LazyReload(ctx)
+				c.state.LazyReload()
 			}
 			node, err = c.slotMasterNode(ctx, slot)
 			if err != nil {
@@ -1567,7 +1591,7 @@
 	for _, idx := range perm {
 		addr := addrs[idx]
 
-		node, err := c.nodes.Get(addr)
+		node, err := c.nodes.GetOrCreate(addr)
 		if err != nil {
 			if firstErr == nil {
 				firstErr = err
@@ -1631,7 +1655,7 @@
 		return nil, err
 	}
 
-	if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
+	if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
 		return c.slotReadOnlyNode(state, slot)
 	}
 	return state.slotMasterNode(slot)
@@ -1655,6 +1679,35 @@
 	return state.slotMasterNode(slot)
 }
 
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	slot := hashtag.Slot(key)
+	node, err := c.slotReadOnlyNode(state, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+	slot := hashtag.Slot(key)
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
 func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
 	for _, n := range nodes {
 		if n == node {
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
index 1f0bae0..085bce8 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -2,24 +2,108 @@
 
 import (
 	"context"
+	"sync"
 	"sync/atomic"
 )
 
 func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
 	cmd := NewIntCmd(ctx, "dbsize")
-	var size int64
-	err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
-		n, err := master.DBSize(ctx).Result()
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		var size int64
+		err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+			n, err := master.DBSize(ctx).Result()
+			if err != nil {
+				return err
+			}
+			atomic.AddInt64(&size, n)
+			return nil
+		})
 		if err != nil {
-			return err
+			cmd.SetErr(err)
+		} else {
+			cmd.val = size
 		}
-		atomic.AddInt64(&size, n)
 		return nil
 	})
-	if err != nil {
-		cmd.SetErr(err)
-		return cmd
+	return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptLoad(ctx, script).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			if cmd.Val() == "" {
+				cmd.val = val
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			return shard.ScriptFlush(ctx).Err()
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
 	}
-	cmd.val = size
+	cmd := NewBoolSliceCmd(ctx, args...)
+
+	result := make([]bool, len(hashes))
+	for i := range result {
+		result[i] = true
+	}
+
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptExists(ctx, hashes...).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			for i, v := range val {
+				result[i] = result[i] && v
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = result
+		}
+		return nil
+	})
 	return cmd
 }
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
index 5dd5533..4bb12a8 100644
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -8,6 +8,7 @@
 	"time"
 
 	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hscan"
 	"github.com/go-redis/redis/v8/internal/proto"
 	"github.com/go-redis/redis/v8/internal/util"
 )
@@ -19,7 +20,7 @@
 	String() string
 	stringArg(int) string
 	firstKeyPos() int8
-	setFirstKeyPos(int8)
+	SetFirstKeyPos(int8)
 
 	readTimeout() *time.Duration
 	readReply(rd *proto.Reader) error
@@ -150,15 +151,21 @@
 	if pos < 0 || pos >= len(cmd.args) {
 		return ""
 	}
-	s, _ := cmd.args[pos].(string)
-	return s
+	arg := cmd.args[pos]
+	switch v := arg.(type) {
+	case string:
+		return v
+	default:
+		// TODO: consider using appendArg
+		return fmt.Sprint(v)
+	}
 }
 
 func (cmd *baseCmd) firstKeyPos() int8 {
 	return cmd.keyPos
 }
 
-func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
 	cmd.keyPos = keyPos
 }
 
@@ -199,6 +206,10 @@
 	return cmdString(cmd, cmd.val)
 }
 
+func (cmd *Cmd) SetVal(val interface{}) {
+	cmd.val = val
+}
+
 func (cmd *Cmd) Val() interface{} {
 	return cmd.val
 }
@@ -211,7 +222,11 @@
 	if cmd.err != nil {
 		return "", cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+	switch val := val.(type) {
 	case string:
 		return val, nil
 	default:
@@ -239,7 +254,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+	switch val := val.(type) {
 	case int64:
 		return val, nil
 	case string:
@@ -254,7 +273,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+	switch val := val.(type) {
 	case int64:
 		return uint64(val), nil
 	case string:
@@ -269,7 +292,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+	switch val := val.(type) {
 	case int64:
 		return float32(val), nil
 	case string:
@@ -288,7 +315,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+	switch val := val.(type) {
 	case int64:
 		return float64(val), nil
 	case string:
@@ -303,7 +334,11 @@
 	if cmd.err != nil {
 		return false, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+	switch val := val.(type) {
 	case int64:
 		return val != 0, nil
 	case string:
@@ -314,6 +349,120 @@
 	}
 }
 
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+	if cmd.err != nil {
+		return nil, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case []interface{}:
+		return val, nil
+	default:
+		return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+	}
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	ss := make([]string, len(slice))
+	for i, iface := range slice {
+		val, err := toString(iface)
+		if err != nil {
+			return nil, err
+		}
+		ss[i] = val
+	}
+	return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]int64, len(slice))
+	for i, iface := range slice {
+		val, err := toInt64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]uint64, len(slice))
+	for i, iface := range slice {
+		val, err := toUint64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float32, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat32(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float64, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat64(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	bools := make([]bool, len(slice))
+	for i, iface := range slice {
+		val, err := toBool(iface)
+		if err != nil {
+			return nil, err
+		}
+		bools[i] = val
+	}
+	return bools, nil
+}
+
 func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
 	cmd.val, err = rd.ReadReply(sliceParser)
 	return err
@@ -359,6 +508,10 @@
 	}
 }
 
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+	cmd.val = val
+}
+
 func (cmd *SliceCmd) Val() []interface{} {
 	return cmd.val
 }
@@ -371,6 +524,26 @@
 	return cmdString(cmd, cmd.val)
 }
 
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	// Pass the list of keys and values.
+	// Skip the first two args for: HMGET key
+	var args []interface{}
+	if cmd.args[0] == "hmget" {
+		args = cmd.args[2:]
+	} else {
+		// Otherwise, it's: MGET field field ...
+		args = cmd.args[1:]
+	}
+
+	return hscan.Scan(dst, args, cmd.val)
+}
+
 func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
 	v, err := rd.ReadArrayReply(sliceParser)
 	if err != nil {
@@ -399,6 +572,10 @@
 	}
 }
 
+func (cmd *StatusCmd) SetVal(val string) {
+	cmd.val = val
+}
+
 func (cmd *StatusCmd) Val() string {
 	return cmd.val
 }
@@ -435,6 +612,10 @@
 	}
 }
 
+func (cmd *IntCmd) SetVal(val int64) {
+	cmd.val = val
+}
+
 func (cmd *IntCmd) Val() int64 {
 	return cmd.val
 }
@@ -475,6 +656,10 @@
 	}
 }
 
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+	cmd.val = val
+}
+
 func (cmd *IntSliceCmd) Val() []int64 {
 	return cmd.val
 }
@@ -523,6 +708,10 @@
 	}
 }
 
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+	cmd.val = val
+}
+
 func (cmd *DurationCmd) Val() time.Duration {
 	return cmd.val
 }
@@ -570,6 +759,10 @@
 	}
 }
 
+func (cmd *TimeCmd) SetVal(val time.Time) {
+	cmd.val = val
+}
+
 func (cmd *TimeCmd) Val() time.Time {
 	return cmd.val
 }
@@ -623,6 +816,10 @@
 	}
 }
 
+func (cmd *BoolCmd) SetVal(val bool) {
+	cmd.val = val
+}
+
 func (cmd *BoolCmd) Val() bool {
 	return cmd.val
 }
@@ -677,6 +874,10 @@
 	}
 }
 
+func (cmd *StringCmd) SetVal(val string) {
+	cmd.val = val
+}
+
 func (cmd *StringCmd) Val() string {
 	return cmd.val
 }
@@ -689,6 +890,13 @@
 	return util.StringToBytes(cmd.val), cmd.err
 }
 
+func (cmd *StringCmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return strconv.ParseBool(cmd.val)
+}
+
 func (cmd *StringCmd) Int() (int, error) {
 	if cmd.err != nil {
 		return 0, cmd.err
@@ -770,6 +978,10 @@
 	}
 }
 
+func (cmd *FloatCmd) SetVal(val float64) {
+	cmd.val = val
+}
+
 func (cmd *FloatCmd) Val() float64 {
 	return cmd.val
 }
@@ -789,6 +1001,59 @@
 
 //------------------------------------------------------------------------------
 
+type FloatSliceCmd struct {
+	baseCmd
+
+	val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+	return &FloatSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+	return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]float64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch num, err := rd.ReadFloatReply(); {
+			case err == Nil:
+				cmd.val[i] = 0
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = num
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
 type StringSliceCmd struct {
 	baseCmd
 
@@ -806,6 +1071,10 @@
 	}
 }
 
+func (cmd *StringSliceCmd) SetVal(val []string) {
+	cmd.val = val
+}
+
 func (cmd *StringSliceCmd) Val() []string {
 	return cmd.val
 }
@@ -859,6 +1128,10 @@
 	}
 }
 
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+	cmd.val = val
+}
+
 func (cmd *BoolSliceCmd) Val() []bool {
 	return cmd.val
 }
@@ -905,6 +1178,10 @@
 	}
 }
 
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+	cmd.val = val
+}
+
 func (cmd *StringStringMapCmd) Val() map[string]string {
 	return cmd.val
 }
@@ -917,6 +1194,27 @@
 	return cmdString(cmd, cmd.val)
 }
 
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	strct, err := hscan.Struct(dest)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range cmd.val {
+		if err := strct.Scan(k, v); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
 	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
 		cmd.val = make(map[string]string, n/2)
@@ -957,6 +1255,10 @@
 	}
 }
 
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+	cmd.val = val
+}
+
 func (cmd *StringIntMapCmd) Val() map[string]int64 {
 	return cmd.val
 }
@@ -1009,6 +1311,10 @@
 	}
 }
 
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+	cmd.val = val
+}
+
 func (cmd *StringStructMapCmd) Val() map[string]struct{} {
 	return cmd.val
 }
@@ -1060,6 +1366,10 @@
 	}
 }
 
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+	cmd.val = val
+}
+
 func (cmd *XMessageSliceCmd) Val() []XMessage {
 	return cmd.val
 }
@@ -1169,6 +1479,10 @@
 	}
 }
 
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+	cmd.val = val
+}
+
 func (cmd *XStreamSliceCmd) Val() []XStream {
 	return cmd.val
 }
@@ -1241,6 +1555,10 @@
 	}
 }
 
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+	cmd.val = val
+}
+
 func (cmd *XPendingCmd) Val() *XPending {
 	return cmd.val
 }
@@ -1343,6 +1661,10 @@
 	}
 }
 
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+	cmd.val = val
+}
+
 func (cmd *XPendingExtCmd) Val() []XPendingExt {
 	return cmd.val
 }
@@ -1403,6 +1725,233 @@
 
 //------------------------------------------------------------------------------
 
+type XAutoClaimCmd struct {
+	baseCmd
+
+	start string
+	val   []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+	return &XAutoClaimCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val, err = readXMessageSlice(rd)
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+	baseCmd
+
+	start string
+	val   []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+	return &XAutoClaimJustIDCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = make([]string, nn)
+		for i := 0; i < nn; i++ {
+			cmd.val[i], err = rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+	baseCmd
+	val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+	Name    string
+	Pending int64
+	Idle    int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+	return &XInfoConsumersCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "consumers", stream, group},
+		},
+	}
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+	cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+	return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoConsumer, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXConsumerInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+	var consumer XInfoConsumer
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return consumer, err
+	}
+	if n != 6 {
+		return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		switch key {
+		case "name":
+			consumer.Name = val
+		case "pending":
+			consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		case "idle":
+			consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		default:
+			return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+		}
+	}
+
+	return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
 type XInfoGroupsCmd struct {
 	baseCmd
 	val []XInfoGroup
@@ -1426,6 +1975,10 @@
 	}
 }
 
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+	cmd.val = val
+}
+
 func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
 	return cmd.val
 }
@@ -1529,6 +2082,10 @@
 	}
 }
 
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+	cmd.val = val
+}
+
 func (cmd *XInfoStreamCmd) Val() *XInfoStream {
 	return cmd.val
 }
@@ -1574,8 +2131,14 @@
 			info.LastGeneratedID, err = rd.ReadString()
 		case "first-entry":
 			info.FirstEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
 		case "last-entry":
 			info.LastEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
 		default:
 			return nil, fmt.Errorf("redis: unexpected content %s "+
 				"in XINFO STREAM reply", key)
@@ -1589,6 +2152,306 @@
 
 //------------------------------------------------------------------------------
 
+type XInfoStreamFullCmd struct {
+	baseCmd
+	val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	LastGeneratedID string
+	Entries         []XMessage
+	Groups          []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+	Name            string
+	LastDeliveredID string
+	PelCount        int64
+	Pending         []XInfoStreamGroupPending
+	Consumers       []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+	ID            string
+	Consumer      string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+	Name     string
+	SeenTime time.Time
+	PelCount int64
+	Pending  []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+	ID            string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+	return &XInfoStreamFullCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+	if n != 12 {
+		return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+			"wanted 12", n)
+	}
+
+	cmd.val = &XInfoStreamFull{}
+
+	for i := 0; i < 6; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return err
+		}
+
+		switch key {
+		case "length":
+			cmd.val.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+		case "last-generated-id":
+			cmd.val.LastGeneratedID, err = rd.ReadString()
+		case "entries":
+			cmd.val.Entries, err = readXMessageSlice(rd)
+		case "groups":
+			cmd.val.Groups, err = readStreamGroups(rd)
+		default:
+			return fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+	groups := make([]XInfoStreamGroup, 0, n)
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 10 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 10", nn)
+		}
+
+		group := XInfoStreamGroup{}
+
+		for f := 0; f < 5; f++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch key {
+			case "name":
+				group.Name, err = rd.ReadString()
+			case "last-delivered-id":
+				group.LastDeliveredID, err = rd.ReadString()
+			case "pel-count":
+				group.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				group.Pending, err = readXInfoStreamGroupPending(rd)
+			case "consumers":
+				group.Consumers, err = readXInfoStreamConsumers(rd)
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", key)
+			}
+
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		groups = append(groups, group)
+	}
+
+	return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	pending := make([]XInfoStreamGroupPending, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 4 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 4", nn)
+		}
+
+		p := XInfoStreamGroupPending{}
+
+		p.ID, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		p.Consumer, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		delivery, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+		p.DeliveryCount, err = rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		pending = append(pending, p)
+	}
+
+	return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	consumers := make([]XInfoStreamConsumer, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 8 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 8", nn)
+		}
+
+		c := XInfoStreamConsumer{}
+
+		for f := 0; f < 4; f++ {
+			cKey, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch cKey {
+			case "name":
+				c.Name, err = rd.ReadString()
+			case "seen-time":
+				seen, err := rd.ReadIntReply()
+				if err != nil {
+					return nil, err
+				}
+				c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+			case "pel-count":
+				c.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				pendingNumber, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+
+				c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+				for pn := 0; pn < pendingNumber; pn++ {
+					nn, err := rd.ReadArrayLen()
+					if err != nil {
+						return nil, err
+					}
+					if nn != 3 {
+						return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+							"wanted 3", nn)
+					}
+
+					p := XInfoStreamConsumerPending{}
+
+					p.ID, err = rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					delivery, err := rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+					p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+					p.DeliveryCount, err = rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+
+					c.Pending = append(c.Pending, p)
+				}
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", cKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+		consumers = append(consumers, c)
+	}
+
+	return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
 type ZSliceCmd struct {
 	baseCmd
 
@@ -1606,6 +2469,10 @@
 	}
 }
 
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+	cmd.val = val
+}
+
 func (cmd *ZSliceCmd) Val() []Z {
 	return cmd.val
 }
@@ -1661,6 +2528,10 @@
 	}
 }
 
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+	cmd.val = val
+}
+
 func (cmd *ZWithKeyCmd) Val() *ZWithKey {
 	return cmd.val
 }
@@ -1725,6 +2596,11 @@
 	}
 }
 
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+	cmd.page = page
+	cmd.cursor = cursor
+}
+
 func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
 	return cmd.page, cmd.cursor
 }
@@ -1779,6 +2655,10 @@
 	}
 }
 
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+	cmd.val = val
+}
+
 func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
 	return cmd.val
 }
@@ -1933,6 +2813,10 @@
 	return args
 }
 
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+	cmd.locations = locations
+}
+
 func (cmd *GeoLocationCmd) Val() []GeoLocation {
 	return cmd.locations
 }
@@ -2024,6 +2908,192 @@
 
 //------------------------------------------------------------------------------
 
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+	Member string
+
+	// Latitude and Longitude when using FromLonLat option.
+	Longitude float64
+	Latitude  float64
+
+	// Distance and unit when using ByRadius option.
+	// Can use m, km, ft, or mi. Default is km.
+	Radius     float64
+	RadiusUnit string
+
+	// Height, width and unit when using ByBox option.
+	// Can be m, km, ft, or mi. Default is km.
+	BoxWidth  float64
+	BoxHeight float64
+	BoxUnit   string
+
+	// Can be ASC or DESC. Default is no sort order.
+	Sort     string
+	Count    int
+	CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+	GeoSearchQuery
+
+	WithCoord bool
+	WithDist  bool
+	WithHash  bool
+}
+
+type GeoSearchStoreQuery struct {
+	GeoSearchQuery
+
+	// When using the StoreDist option, the command stores the items in a
+	// sorted set populated with their distance from the center of the circle or box,
+	// as a floating-point number, in the same unit specified for that shape.
+	StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithHash {
+		args = append(args, "withhash")
+	}
+
+	return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+	if q.Member != "" {
+		args = append(args, "frommember", q.Member)
+	} else {
+		args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+	}
+
+	if q.Radius > 0 {
+		if q.RadiusUnit == "" {
+			q.RadiusUnit = "km"
+		}
+		args = append(args, "byradius", q.Radius, q.RadiusUnit)
+	} else {
+		if q.BoxUnit == "" {
+			q.BoxUnit = "km"
+		}
+		args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+	}
+
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+		if q.CountAny {
+			args = append(args, "any")
+		}
+	}
+
+	return args
+}
+
+type GeoSearchLocationCmd struct {
+	baseCmd
+
+	opt *GeoSearchLocationQuery
+	val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+	ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+	return &GeoSearchLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		opt: opt,
+	}
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+	cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+	return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]GeoLocation, n)
+	for i := 0; i < n; i++ {
+		_, err = rd.ReadArrayLen()
+		if err != nil {
+			return err
+		}
+
+		var loc GeoLocation
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return err
+		}
+		if cmd.opt.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithCoord {
+			nn, err := rd.ReadArrayLen()
+			if err != nil {
+				return err
+			}
+			if nn != 2 {
+				return fmt.Errorf("got %d coordinates, expected 2", nn)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+
+		cmd.val[i] = loc
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
 type GeoPos struct {
 	Longitude, Latitude float64
 }
@@ -2045,6 +3115,10 @@
 	}
 }
 
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+	cmd.val = val
+}
+
 func (cmd *GeoPosCmd) Val() []*GeoPos {
 	return cmd.val
 }
@@ -2122,6 +3196,10 @@
 	}
 }
 
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+	cmd.val = val
+}
+
 func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
 	return cmd.val
 }
@@ -2309,6 +3387,10 @@
 	}
 }
 
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+	cmd.val = val
+}
+
 func (cmd *SlowLogCmd) Val() []SlowLog {
 	return cmd.val
 }
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
index 79698ba..bbfe089 100644
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -9,7 +9,8 @@
 	"github.com/go-redis/redis/v8/internal"
 )
 
-// KeepTTL is an option for Set command to keep key's existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 // For example:
 //
 //    rdb.Set(ctx, key, value, redis.KeepTTL)
@@ -67,6 +68,11 @@
 			dst = append(dst, k, v)
 		}
 		return dst
+	case map[string]string:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
 	default:
 		return append(dst, arg)
 	}
@@ -90,6 +96,10 @@
 	Exists(ctx context.Context, keys ...string) *IntCmd
 	Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
 	ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
 	Keys(ctx context.Context, pattern string) *StringSliceCmd
 	Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
 	Move(ctx context.Context, key string, db int) *BoolCmd
@@ -117,6 +127,8 @@
 	Get(ctx context.Context, key string) *StringCmd
 	GetRange(ctx context.Context, key string, start, end int64) *StringCmd
 	GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+	GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+	GetDel(ctx context.Context, key string) *StringCmd
 	Incr(ctx context.Context, key string) *IntCmd
 	IncrBy(ctx context.Context, key string, value int64) *IntCmd
 	IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
@@ -124,11 +136,14 @@
 	MSet(ctx context.Context, values ...interface{}) *StatusCmd
 	MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
 	Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+	// TODO: rename to SetEx
 	SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
 	SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
 	SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
 	SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
 	StrLen(ctx context.Context, key string) *IntCmd
+	Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
 
 	GetBit(ctx context.Context, key string, offset int64) *IntCmd
 	SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
@@ -141,6 +156,7 @@
 	BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
 
 	Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+	ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
 	SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
 	HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
 	ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
@@ -158,6 +174,7 @@
 	HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
 	HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
 	HVals(ctx context.Context, key string) *StringSliceCmd
+	HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
 
 	BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
 	BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
@@ -168,6 +185,7 @@
 	LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
 	LLen(ctx context.Context, key string) *IntCmd
 	LPop(ctx context.Context, key string) *StringCmd
+	LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
 	LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
 	LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
 	LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
@@ -177,9 +195,12 @@
 	LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
 	LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
 	RPop(ctx context.Context, key string) *StringCmd
+	RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
 	RPopLPush(ctx context.Context, source, destination string) *StringCmd
 	RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
 	RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+	BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
 
 	SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
 	SCard(ctx context.Context, key string) *IntCmd
@@ -188,6 +209,7 @@
 	SInter(ctx context.Context, keys ...string) *StringSliceCmd
 	SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
 	SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+	SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
 	SMembers(ctx context.Context, key string) *StringSliceCmd
 	SMembersMap(ctx context.Context, key string) *StringStructMapCmd
 	SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
@@ -212,6 +234,7 @@
 	XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
 	XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
 	XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+	XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
 	XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
 	XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
 	XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
@@ -219,19 +242,42 @@
 	XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
 	XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
 	XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+	XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+	XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+	// TODO: XTrim and XTrimApprox remove in v9.
 	XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
 	XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+	XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+	XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
 	XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
 	XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+	XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+	XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
 
 	BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
 	BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+	// TODO: remove
+	//		ZAddCh
+	//		ZIncr
+	//		ZAddNXCh
+	//		ZAddXXCh
+	//		ZIncrNX
+	//		ZIncrXX
+	// 	in v9.
+	// 	use ZAddArgs and ZAddArgsIncr.
+
 	ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+	ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
 	ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
 	ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
 	ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
@@ -239,7 +285,10 @@
 	ZCount(ctx context.Context, key, min, max string) *IntCmd
 	ZLexCount(ctx context.Context, key, min, max string) *IntCmd
 	ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+	ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+	ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
 	ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+	ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
 	ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
 	ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
 	ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
@@ -247,6 +296,9 @@
 	ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
 	ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
 	ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+	ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+	ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
 	ZRank(ctx context.Context, key, member string) *IntCmd
 	ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
 	ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
@@ -260,6 +312,12 @@
 	ZRevRank(ctx context.Context, key, member string) *IntCmd
 	ZScore(ctx context.Context, key, member string) *FloatCmd
 	ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+	ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+	ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+	ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+	ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+	ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
 
 	PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
 	PFCount(ctx context.Context, keys ...string) *IntCmd
@@ -332,6 +390,9 @@
 	GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
 	GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
 	GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+	GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+	GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+	GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
 	GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
 	GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
 }
@@ -364,7 +425,7 @@
 	return cmd
 }
 
-// Perform an AUTH command, using the given user and pass.
+// AuthACL Perform an AUTH command, using the given user and pass.
 // Should be used to authenticate the current connection with one of the connections defined in the ACL list
 // when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
 func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
@@ -375,6 +436,7 @@
 
 func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
 	cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+	cmd.setReadTimeout(timeout)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -425,7 +487,7 @@
 	return cmd
 }
 
-func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
 	panic("not implemented")
 }
 
@@ -469,7 +531,37 @@
 }
 
 func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
-	cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+	return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+	ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+	args := make([]interface{}, 3, 4)
+	args[0] = "expire"
+	args[1] = key
+	args[2] = formatSec(ctx, expiration)
+	if mode != "" {
+		args = append(args, mode)
+	}
+
+	cmd := NewBoolCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -688,7 +780,7 @@
 	return cmd
 }
 
-// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
 func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "get", key)
 	_ = c(ctx, cmd)
@@ -707,6 +799,33 @@
 	return cmd
 }
 
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+	args := make([]interface{}, 0, 4)
+	args = append(args, "getex", key)
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == 0 {
+		args = append(args, "persist")
+	}
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "getdel", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
 	cmd := NewIntCmd(ctx, "incr", key)
 	_ = c(ctx, cmd)
@@ -762,11 +881,12 @@
 	return cmd
 }
 
-// Redis `SET key value [expiration]` command.
+// Set Redis `SET key value [expiration]` command.
 // Use expiration for `SETEX`-like behavior.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
 	args := make([]interface{}, 3, 5)
 	args[0] = "set"
@@ -787,17 +907,69 @@
 	return cmd
 }
 
-// Redis `SETEX key expiration value` command.
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+	// Mode can be `NX` or `XX` or empty.
+	Mode string
+
+	// Zero `TTL` or `Expiration` means that the key has no expiration time.
+	TTL      time.Duration
+	ExpireAt time.Time
+
+	// When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+	Get bool
+
+	// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+	// otherwise you will receive an error: (error) ERR syntax error.
+	KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+	args := []interface{}{"set", key, value}
+
+	if a.KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	if !a.ExpireAt.IsZero() {
+		args = append(args, "exat", a.ExpireAt.Unix())
+	}
+	if a.TTL > 0 {
+		if usePrecise(a.TTL) {
+			args = append(args, "px", formatMs(ctx, a.TTL))
+		} else {
+			args = append(args, "ex", formatSec(ctx, a.TTL))
+		}
+	}
+
+	if a.Mode != "" {
+		args = append(args, a.Mode)
+	}
+
+	if a.Get {
+		args = append(args, "get")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
 func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
 	cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SET key value [expiration] NX` command.
+// SetNX Redis `SET key value [expiration] NX` command.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
 	var cmd *BoolCmd
 	switch expiration {
@@ -818,10 +990,11 @@
 	return cmd
 }
 
-// Redis `SET key value [expiration] XX` command.
+// SetXX Redis `SET key value [expiration] XX` command.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
 	var cmd *BoolCmd
 	switch expiration {
@@ -853,6 +1026,16 @@
 	return cmd
 }
 
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+	args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+	if replace {
+		args = append(args, "REPLACE")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
@@ -965,6 +1148,22 @@
 	return cmd
 }
 
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	if keyType != "" {
+		args = append(args, "type", keyType)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
 	args := []interface{}{"sscan", key, cursor}
 	if match != "" {
@@ -1113,6 +1312,21 @@
 	return cmd
 }
 
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "hrandfield", key, count)
+	if withValues {
+		args = append(args, "withvalues")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
@@ -1190,6 +1404,12 @@
 	return cmd
 }
 
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 type LPosArgs struct {
 	Rank, MaxLen int64
 }
@@ -1283,6 +1503,12 @@
 	return cmd
 }
 
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
 	cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
 	_ = c(ctx, cmd)
@@ -1309,6 +1535,21 @@
 	return cmd
 }
 
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BLMove(
+	ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+	cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
@@ -1379,14 +1620,25 @@
 	return cmd
 }
 
-// Redis `SMEMBERS key` command output as a slice.
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "smismember"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
 func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "smembers", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SMEMBERS key` command output as a map.
+// SMembersMap Redis `SMEMBERS key` command output as a map.
 func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
 	cmd := NewStringStructMapCmd(ctx, "smembers", key)
 	_ = c(ctx, cmd)
@@ -1399,28 +1651,28 @@
 	return cmd
 }
 
-// Redis `SPOP key` command.
+// SPop Redis `SPOP key` command.
 func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "spop", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SPOP key count` command.
+// SPopN Redis `SPOP key count` command.
 func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "spop", key, count)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SRANDMEMBER key` command.
+// SRandMember Redis `SRANDMEMBER key` command.
 func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "srandmember", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SRANDMEMBER key count` command.
+// SRandMemberN Redis `SRANDMEMBER key count` command.
 func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
 	_ = c(ctx, cmd)
@@ -1468,22 +1720,50 @@
 //   - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
 //
 // Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
 type XAddArgs struct {
-	Stream       string
-	MaxLen       int64 // MAXLEN N
+	Stream     string
+	NoMkStream bool
+	MaxLen     int64 // MAXLEN N
+
+	// Deprecated: use MaxLen+Approx, remove in v9.
 	MaxLenApprox int64 // MAXLEN ~ N
-	ID           string
-	Values       interface{}
+
+	MinID string
+	// Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+	Approx bool
+	Limit  int64
+	ID     string
+	Values interface{}
 }
 
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
 func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
-	args := make([]interface{}, 0, 8)
-	args = append(args, "xadd")
-	args = append(args, a.Stream)
-	if a.MaxLen > 0 {
-		args = append(args, "maxlen", a.MaxLen)
-	} else if a.MaxLenApprox > 0 {
+	args := make([]interface{}, 0, 11)
+	args = append(args, "xadd", a.Stream)
+	if a.NoMkStream {
+		args = append(args, "nomkstream")
+	}
+	switch {
+	case a.MaxLen > 0:
+		if a.Approx {
+			args = append(args, "maxlen", "~", a.MaxLen)
+		} else {
+			args = append(args, "maxlen", a.MaxLen)
+		}
+	case a.MaxLenApprox > 0:
+		// TODO remove in v9.
 		args = append(args, "maxlen", "~", a.MaxLenApprox)
+	case a.MinID != "":
+		if a.Approx {
+			args = append(args, "minid", "~", a.MinID)
+		} else {
+			args = append(args, "minid", a.MinID)
+		}
+	}
+	if a.Limit > 0 {
+		args = append(args, "limit", a.Limit)
 	}
 	if a.ID != "" {
 		args = append(args, a.ID)
@@ -1544,7 +1824,7 @@
 }
 
 func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
-	args := make([]interface{}, 0, 5+len(a.Streams))
+	args := make([]interface{}, 0, 6+len(a.Streams))
 	args = append(args, "xread")
 
 	keyPos := int8(1)
@@ -1568,7 +1848,7 @@
 	if a.Block >= 0 {
 		cmd.setReadTimeout(a.Block)
 	}
-	cmd.setFirstKeyPos(keyPos)
+	cmd.SetFirstKeyPos(keyPos)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1604,6 +1884,12 @@
 	return cmd
 }
 
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
 	cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
 	_ = c(ctx, cmd)
@@ -1620,10 +1906,10 @@
 }
 
 func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
-	args := make([]interface{}, 0, 8+len(a.Streams))
+	args := make([]interface{}, 0, 10+len(a.Streams))
 	args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
 
-	keyPos := int8(1)
+	keyPos := int8(4)
 	if a.Count > 0 {
 		args = append(args, "count", a.Count)
 		keyPos += 2
@@ -1646,7 +1932,7 @@
 	if a.Block >= 0 {
 		cmd.setReadTimeout(a.Block)
 	}
-	cmd.setFirstKeyPos(keyPos)
+	cmd.SetFirstKeyPos(keyPos)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1670,6 +1956,7 @@
 type XPendingExtArgs struct {
 	Stream   string
 	Group    string
+	Idle     time.Duration
 	Start    string
 	End      string
 	Count    int64
@@ -1677,8 +1964,12 @@
 }
 
 func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
-	args := make([]interface{}, 0, 7)
-	args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+	args := make([]interface{}, 0, 9)
+	args = append(args, "xpending", a.Stream, a.Group)
+	if a.Idle != 0 {
+		args = append(args, "idle", formatMs(ctx, a.Idle))
+	}
+	args = append(args, a.Start, a.End, a.Count)
 	if a.Consumer != "" {
 		args = append(args, a.Consumer)
 	}
@@ -1687,6 +1978,39 @@
 	return cmd
 }
 
+type XAutoClaimArgs struct {
+	Stream   string
+	Group    string
+	MinIdle  time.Duration
+	Start    string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+	args := xAutoClaimArgs(ctx, a)
+	cmd := NewXAutoClaimCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+	args := xAutoClaimArgs(ctx, a)
+	args = append(args, "justid")
+	cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 8)
+	args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+	}
+	return args
+}
+
 type XClaimArgs struct {
 	Stream   string
 	Group    string
@@ -1711,7 +2035,7 @@
 }
 
 func xClaimArgs(a *XClaimArgs) []interface{} {
-	args := make([]interface{}, 0, 4+len(a.Messages))
+	args := make([]interface{}, 0, 5+len(a.Messages))
 	args = append(args,
 		"xclaim",
 		a.Stream,
@@ -1723,14 +2047,67 @@
 	return args
 }
 
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
-	cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//		XTRIM key MAXLEN/MINID threshold LIMIT limit.
+//		XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+	ctx context.Context, key, strategy string,
+	approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+	args := make([]interface{}, 0, 7)
+	args = append(args, "xtrim", key, strategy)
+	if approx {
+		args = append(args, "~")
+	}
+	args = append(args, threshold)
+	if limit > 0 {
+		args = append(args, "limit", limit)
+	}
+	cmd := NewIntCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
 func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
-	cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+	return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+	cmd := NewXInfoConsumersCmd(ctx, key, group)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1747,6 +2124,19 @@
 	return cmd
 }
 
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+	args := make([]interface{}, 0, 6)
+	args = append(args, "xinfo", "stream", key, "full")
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewXInfoStreamFullCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 // Z represents sorted set member.
@@ -1761,7 +2151,7 @@
 	Key string
 }
 
-// ZStore is used as an arg to ZInterStore and ZUnionStore.
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
 type ZStore struct {
 	Keys    []string
 	Weights []float64
@@ -1769,7 +2159,34 @@
 	Aggregate string
 }
 
-// Redis `BZPOPMAX key [key ...] timeout` command.
+func (z ZStore) len() (n int) {
+	n = len(z.Keys)
+	if len(z.Weights) > 0 {
+		n += 1 + len(z.Weights)
+	}
+	if z.Aggregate != "" {
+		n += 2
+	}
+	return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+	for _, key := range z.Keys {
+		args = append(args, key)
+	}
+	if len(z.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weights := range z.Weights {
+			args = append(args, weights)
+		}
+	}
+	if z.Aggregate != "" {
+		args = append(args, "aggregate", z.Aggregate)
+	}
+	return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
 func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
 	args := make([]interface{}, 1+len(keys)+1)
 	args[0] = "bzpopmax"
@@ -1783,7 +2200,7 @@
 	return cmd
 }
 
-// Redis `BZPOPMIN key [key ...] timeout` command.
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
 func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
 	args := make([]interface{}, 1+len(keys)+1)
 	args[0] = "bzpopmin"
@@ -1797,96 +2214,169 @@
 	return cmd
 }
 
-func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
-	for i, m := range members {
-		a[n+2*i] = m.Score
-		a[n+2*i+1] = m.Member
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+	NX      bool
+	XX      bool
+	LT      bool
+	GT      bool
+	Ch      bool
+	Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+	a := make([]interface{}, 0, 6+2*len(args.Members))
+	a = append(a, "zadd", key)
+
+	// The GT, LT and NX options are mutually exclusive.
+	if args.NX {
+		a = append(a, "nx")
+	} else {
+		if args.XX {
+			a = append(a, "xx")
+		}
+		if args.GT {
+			a = append(a, "gt")
+		} else if args.LT {
+			a = append(a, "lt")
+		}
 	}
-	cmd := NewIntCmd(ctx, a...)
+	if args.Ch {
+		a = append(a, "ch")
+	}
+	if incr {
+		a = append(a, "incr")
+	}
+	for _, m := range args.Members {
+		a = append(a, m.Score)
+		a = append(a, m.Member)
+	}
+	return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+	cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+	args.Members = make([]Z, len(members))
+	for i, m := range members {
+		args.Members[i] = *m
+	}
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
 func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 2
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1] = "zadd", key
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{}, members...)
 }
 
-// Redis `ZADD key NX score member [score member ...]` command.
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
 func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "nx"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+	}, members...)
 }
 
-// Redis `ZADD key XX score member [score member ...]` command.
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
 func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "xx"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+	}, members...)
 }
 
-// Redis `ZADD key CH score member [score member ...]` command.
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		Ch: true,
+	}, members...)
 }
 
-// Redis `ZADD key NX CH score member [score member ...]` command.
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			NX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 4
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+		Ch: true,
+	}, members...)
 }
 
-// Redis `ZADD key XX CH score member [score member ...]` command.
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			XX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 4
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+		Ch: true,
+	}, members...)
 }
 
-func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
-	for i, m := range members {
-		a[n+2*i] = m.Score
-		a[n+2*i+1] = m.Member
-	}
-	cmd := NewFloatCmd(ctx, a...)
-	_ = c(ctx, cmd)
-	return cmd
-}
-
-// Redis `ZADD key INCR score member` command.
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 3
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2] = "zadd", key, "incr"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		Members: []Z{*member},
+	})
 }
 
-// Redis `ZADD key NX INCR score member` command.
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			NX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 4
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		NX:      true,
+		Members: []Z{*member},
+	})
 }
 
-// Redis `ZADD key XX INCR score member` command.
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			XX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 4
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		XX:      true,
+		Members: []Z{*member},
+	})
 }
 
 func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
@@ -1914,24 +2404,44 @@
 }
 
 func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
-	args := make([]interface{}, 3+len(store.Keys))
-	args[0] = "zinterstore"
-	args[1] = destination
-	args[2] = len(store.Keys)
-	for i, key := range store.Keys {
-		args[3+i] = key
-	}
-	if len(store.Weights) > 0 {
-		args = append(args, "weights")
-		for _, weight := range store.Weights {
-			args = append(args, weight)
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "aggregate", store.Aggregate)
-	}
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinterstore", destination, len(store.Keys))
+	args = store.appendArgs(args)
 	cmd := NewIntCmd(ctx, args...)
-	cmd.setFirstKeyPos(3)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "zmscore"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewFloatSliceCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1976,29 +2486,112 @@
 	return cmd
 }
 
-func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
-	args := []interface{}{
-		"zrange",
-		key,
-		start,
-		stop,
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//		ZREVRANGE,
+//		ZRANGEBYSCORE,
+//		ZREVRANGEBYSCORE,
+//		ZRANGEBYLEX,
+//		ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+	Key string
+
+	// When the ByScore option is provided, the open interval(exclusive) can be set.
+	// By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+	// It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+	// For example:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"(3",
+	//	 		Stop: 				8,
+	//			ByScore:			true,
+	//	 	}
+	// 	 	cmd: "ZRange example-key (3 8 ByScore"  (3 < score <= 8).
+	//
+	// For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+	// You can set the <Start> and <Stop> options as follows:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"[abc",
+	//	 		Stop: 				"(def",
+	//			ByLex:				true,
+	//	 	}
+	//		cmd: "ZRange example-key [abc (def ByLex"
+	//
+	// For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+	// You can read the documentation for more information: https://redis.io/commands/zrange
+	Start interface{}
+	Stop  interface{}
+
+	// The ByScore and ByLex options are mutually exclusive.
+	ByScore bool
+	ByLex   bool
+
+	Rev bool
+
+	// limit offset count.
+	Offset int64
+	Count  int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+	// For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+	if z.Rev && (z.ByScore || z.ByLex) {
+		args = append(args, z.Key, z.Stop, z.Start)
+	} else {
+		args = append(args, z.Key, z.Start, z.Stop)
 	}
-	if withScores {
-		args = append(args, "withscores")
+
+	if z.ByScore {
+		args = append(args, "byscore")
+	} else if z.ByLex {
+		args = append(args, "bylex")
 	}
+	if z.Rev {
+		args = append(args, "rev")
+	}
+	if z.Offset != 0 || z.Count != 0 {
+		args = append(args, "limit", z.Offset, z.Count)
+	}
+	return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
 	cmd := NewStringSliceCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
-	return c.zRange(ctx, key, start, stop, false)
+	return c.ZRangeArgs(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
 }
 
 func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
-	cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
-	_ = c(ctx, cmd)
-	return cmd
+	return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
 }
 
 type ZRangeBy struct {
@@ -2047,6 +2640,15 @@
 	return cmd
 }
 
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrangestore", dst)
+	args = z.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
 	cmd := NewIntCmd(ctx, "zrank", key, member)
 	_ = c(ctx, cmd)
@@ -2149,26 +2751,91 @@
 	return cmd
 }
 
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
-	args := make([]interface{}, 3+len(store.Keys))
-	args[0] = "zunionstore"
-	args[1] = dest
-	args[2] = len(store.Keys)
-	for i, key := range store.Keys {
-		args[3+i] = key
-	}
-	if len(store.Weights) > 0 {
-		args = append(args, "weights")
-		for _, weight := range store.Weights {
-			args = append(args, weight)
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "aggregate", store.Aggregate)
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunionstore", dest, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "zrandmember", key, count)
+	if withScores {
+		args = append(args, "withscores")
 	}
 
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+	args[len(keys)+2] = "withscores"
+
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 0, 3+len(keys))
+	args = append(args, "zdiffstore", destination, len(keys))
+	for _, key := range keys {
+		args = append(args, key)
+	}
 	cmd := NewIntCmd(ctx, args...)
-	cmd.setFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2395,7 +3062,7 @@
 	return cmd
 }
 
-func (c cmdable) Sync(ctx context.Context) {
+func (c cmdable) Sync(_ context.Context) {
 	panic("not implemented")
 }
 
@@ -2432,6 +3099,7 @@
 		args = append(args, "SAMPLES", samples[0])
 	}
 	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2448,6 +3116,7 @@
 	}
 	cmdArgs = appendArgs(cmdArgs, args)
 	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2462,6 +3131,7 @@
 	}
 	cmdArgs = appendArgs(cmdArgs, args)
 	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2710,7 +3380,7 @@
 	return cmd
 }
 
-// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
 func (c cmdable) GeoRadiusByMember(
 	ctx context.Context, key, member string, query *GeoRadiusQuery,
 ) *GeoLocationCmd {
@@ -2737,6 +3407,38 @@
 	return cmd
 }
 
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+	args := make([]interface{}, 0, 13)
+	args = append(args, "geosearch", key)
+	args = geoSearchArgs(q, args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+	ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+	args := make([]interface{}, 0, 16)
+	args = append(args, "geosearch", key)
+	args = geoSearchLocationArgs(q, args)
+	cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+	args := make([]interface{}, 0, 15)
+	args = append(args, "geosearchstore", store, key)
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+	if q.StoreDist {
+		args = append(args, "storedist")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) GeoDist(
 	ctx context.Context, key string, member1, member2, unit string,
 ) *FloatCmd {
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
index 9fe1376..521594b 100644
--- a/vendor/github.com/go-redis/redis/v8/error.go
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -10,6 +10,7 @@
 	"github.com/go-redis/redis/v8/internal/proto"
 )
 
+// ErrClosed performs any operation on the closed client will return this error.
 var ErrClosed = pool.ErrClosed
 
 type Error interface {
@@ -64,15 +65,28 @@
 	return ok
 }
 
-func isBadConn(err error, allowTimeout bool) bool {
-	if err == nil {
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+	switch err {
+	case nil:
 		return false
+	case context.Canceled, context.DeadlineExceeded:
+		return true
 	}
 
 	if isRedisError(err) {
-		// Close connections in read only state in case domain addr is used
-		// and domain resolves to a different Redis Server. See #790.
-		return isReadOnlyError(err)
+		switch {
+		case isReadOnlyError(err):
+			// Close connections in read only state in case domain addr is used
+			// and domain resolves to a different Redis Server. See #790.
+			return true
+		case isMovedSameConnAddr(err, addr):
+			// Close connections when we are asked to move to the same addr
+			// of the connection. Force a DNS resolution when all connections
+			// of the pool are recycled
+			return true
+		default:
+			return false
+		}
 	}
 
 	if allowTimeout {
@@ -115,6 +129,14 @@
 	return strings.HasPrefix(err.Error(), "READONLY ")
 }
 
+func isMovedSameConnAddr(err error, addr string) bool {
+	redisError := err.Error()
+	if !strings.HasPrefix(redisError, "MOVED ") {
+		return false
+	}
+	return strings.HasSuffix(redisError, " "+addr)
+}
+
 //------------------------------------------------------------------------------
 
 type timeoutError interface {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
index 2fc74ad..b3a4f21 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -60,7 +60,7 @@
 	return rand.Intn(slotNumber)
 }
 
-// hashSlot returns a consistent slot number between 0 and 16383
+// Slot returns a consistent slot number between 0 and 16383
 // for any given string key.
 func Slot(key string) int {
 	if key == "" {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
new file mode 100644
index 0000000..852c8bd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
@@ -0,0 +1,201 @@
+package hscan
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+	// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+	decoders = []decoderFunc{
+		reflect.Bool:          decodeBool,
+		reflect.Int:           decodeInt,
+		reflect.Int8:          decodeInt8,
+		reflect.Int16:         decodeInt16,
+		reflect.Int32:         decodeInt32,
+		reflect.Int64:         decodeInt64,
+		reflect.Uint:          decodeUint,
+		reflect.Uint8:         decodeUint8,
+		reflect.Uint16:        decodeUint16,
+		reflect.Uint32:        decodeUint32,
+		reflect.Uint64:        decodeUint64,
+		reflect.Float32:       decodeFloat32,
+		reflect.Float64:       decodeFloat64,
+		reflect.Complex64:     decodeUnsupported,
+		reflect.Complex128:    decodeUnsupported,
+		reflect.Array:         decodeUnsupported,
+		reflect.Chan:          decodeUnsupported,
+		reflect.Func:          decodeUnsupported,
+		reflect.Interface:     decodeUnsupported,
+		reflect.Map:           decodeUnsupported,
+		reflect.Ptr:           decodeUnsupported,
+		reflect.Slice:         decodeSlice,
+		reflect.String:        decodeString,
+		reflect.Struct:        decodeUnsupported,
+		reflect.UnsafePointer: decodeUnsupported,
+	}
+
+	// Global map of struct field specs that is populated once for every new
+	// struct type that is scanned. This caches the field types and the corresponding
+	// decoder functions to avoid iterating through struct fields on subsequent scans.
+	globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+	v := reflect.ValueOf(dst)
+
+	// The destination to scan into should be a struct pointer.
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+	}
+
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+	}
+
+	return StructValue{
+		spec:  globalStructMap.get(v.Type()),
+		value: v,
+	}, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+	if len(keys) != len(vals) {
+		return errors.New("args should have the same number of keys and vals")
+	}
+
+	strct, err := Struct(dst)
+	if err != nil {
+		return err
+	}
+
+	// Iterate through the (key, value) sequence.
+	for i := 0; i < len(vals); i++ {
+		key, ok := keys[i].(string)
+		if !ok {
+			continue
+		}
+
+		val, ok := vals[i].(string)
+		if !ok {
+			continue
+		}
+
+		if err := strct.Scan(key, val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+	b, err := strconv.ParseBool(s)
+	if err != nil {
+		return err
+	}
+	f.SetBool(b)
+	return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetInt(v)
+	return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetUint(v)
+	return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+	f.SetString(s)
+	return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+	// []byte slice ([]uint8).
+	if f.Type().Elem().Kind() == reflect.Uint8 {
+		f.SetBytes([]byte(s))
+	}
+	return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+	return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
new file mode 100644
index 0000000..6839412
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
@@ -0,0 +1,93 @@
+package hscan
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+	m sync.Map
+}
+
+func newStructMap() *structMap {
+	return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+	if v, ok := s.m.Load(t); ok {
+		return v.(*structSpec)
+	}
+
+	spec := newStructSpec(t, "redis")
+	s.m.Store(t, spec)
+	return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+	m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+	s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+	numField := t.NumField()
+	out := &structSpec{
+		m: make(map[string]*structField, numField),
+	}
+
+	for i := 0; i < numField; i++ {
+		f := t.Field(i)
+
+		tag := f.Tag.Get(fieldTag)
+		if tag == "" || tag == "-" {
+			continue
+		}
+
+		tag = strings.Split(tag, ",")[0]
+		if tag == "" {
+			continue
+		}
+
+		// Use the built-in decoder.
+		out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+	}
+
+	return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+	index int
+	fn    decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+	spec  *structSpec
+	value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+	field, ok := s.spec.m[key]
+	if !ok {
+		return nil
+	}
+	if err := field.fn(s.value.Field(field.index), value); err != nil {
+		t := s.value.Type()
+		return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+			value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+	}
+	return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
deleted file mode 100644
index e837526..0000000
--- a/vendor/github.com/go-redis/redis/v8/internal/instruments.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package internal
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/api/global"
-	"go.opentelemetry.io/otel/api/metric"
-)
-
-var (
-	// WritesCounter is a count of write commands performed.
-	WritesCounter metric.Int64Counter
-	// NewConnectionsCounter is a count of new connections.
-	NewConnectionsCounter metric.Int64Counter
-)
-
-func init() {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
-		}
-	}()
-
-	meter := metric.Must(global.Meter("github.com/go-redis/redis"))
-
-	WritesCounter = meter.NewInt64Counter("redis.writes",
-		metric.WithDescription("the number of writes initiated"),
-	)
-
-	NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
-		metric.WithDescription("the number of connections created"),
-	)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
index 3810f9e..c8b9213 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/log.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -19,6 +19,8 @@
 	_ = l.log.Output(2, fmt.Sprintf(format, v...))
 }
 
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
 var Logger Logging = &logger{
 	log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
 }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
index 08a2071..5661659 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -7,9 +7,7 @@
 	"sync/atomic"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/proto"
-	"go.opentelemetry.io/otel/api/trace"
 )
 
 var noDeadline = time.Time{}
@@ -66,41 +64,28 @@
 }
 
 func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
-	return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
-		if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-		if err := fn(cn.rd); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-		return nil
-	})
+	if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+	return fn(cn.rd)
 }
 
 func (cn *Conn) WithWriter(
 	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
 ) error {
-	return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
-		if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
-			return internal.RecordError(ctx, err)
-		}
+	if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
 
-		if cn.bw.Buffered() > 0 {
-			cn.bw.Reset(cn.netConn)
-		}
+	if cn.bw.Buffered() > 0 {
+		cn.bw.Reset(cn.netConn)
+	}
 
-		if err := fn(cn.wr); err != nil {
-			return internal.RecordError(ctx, err)
-		}
+	if err := fn(cn.wr); err != nil {
+		return err
+	}
 
-		if err := cn.bw.Flush(); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-
-		internal.WritesCounter.Add(ctx, 1)
-
-		return nil
-	})
+	return cn.bw.Flush()
 }
 
 func (cn *Conn) Close() error {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
index 355742b..44a4e77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -12,7 +12,10 @@
 )
 
 var (
-	ErrClosed      = errors.New("redis: client is closed")
+	// ErrClosed performs any operation on the closed client will return this error.
+	ErrClosed = errors.New("redis: client is closed")
+
+	// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
 	ErrPoolTimeout = errors.New("redis: connection pool timeout")
 )
 
@@ -54,6 +57,7 @@
 	Dialer  func(context.Context) (net.Conn, error)
 	OnClose func(*Conn) error
 
+	PoolFIFO           bool
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -117,9 +121,10 @@
 	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
 		p.poolSize++
 		p.idleConnsLen++
+
 		go func() {
 			err := p.addIdleConn()
-			if err != nil {
+			if err != nil && err != ErrClosed {
 				p.connsMu.Lock()
 				p.poolSize--
 				p.idleConnsLen--
@@ -136,9 +141,16 @@
 	}
 
 	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return ErrClosed
+	}
+
 	p.conns = append(p.conns, cn)
 	p.idleConns = append(p.idleConns, cn)
-	p.connsMu.Unlock()
 	return nil
 }
 
@@ -153,6 +165,14 @@
 	}
 
 	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return nil, ErrClosed
+	}
+
 	p.conns = append(p.conns, cn)
 	if pooled {
 		// If pool is full remove the cn on next Put.
@@ -162,7 +182,6 @@
 			p.poolSize++
 		}
 	}
-	p.connsMu.Unlock()
 
 	return cn, nil
 }
@@ -185,7 +204,6 @@
 		return nil, err
 	}
 
-	internal.NewConnectionsCounter.Add(ctx, 1)
 	cn := NewConn(netConn)
 	cn.pooled = pooled
 	return cn, nil
@@ -228,16 +246,19 @@
 		return nil, ErrClosed
 	}
 
-	err := p.waitTurn(ctx)
-	if err != nil {
+	if err := p.waitTurn(ctx); err != nil {
 		return nil, err
 	}
 
 	for {
 		p.connsMu.Lock()
-		cn := p.popIdle()
+		cn, err := p.popIdle()
 		p.connsMu.Unlock()
 
+		if err != nil {
+			return nil, err
+		}
+
 		if cn == nil {
 			break
 		}
@@ -306,17 +327,28 @@
 	<-p.queue
 }
 
-func (p *ConnPool) popIdle() *Conn {
-	if len(p.idleConns) == 0 {
-		return nil
+func (p *ConnPool) popIdle() (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+	n := len(p.idleConns)
+	if n == 0 {
+		return nil, nil
 	}
 
-	idx := len(p.idleConns) - 1
-	cn := p.idleConns[idx]
-	p.idleConns = p.idleConns[:idx]
+	var cn *Conn
+	if p.opt.PoolFIFO {
+		cn = p.idleConns[0]
+		copy(p.idleConns, p.idleConns[1:])
+		p.idleConns = p.idleConns[:n-1]
+	} else {
+		idx := n - 1
+		cn = p.idleConns[idx]
+		p.idleConns = p.idleConns[:idx]
+	}
 	p.idleConnsLen--
 	p.checkMinIdleConns()
-	return cn
+	return cn, nil
 }
 
 func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
@@ -477,6 +509,7 @@
 		p.connsMu.Lock()
 		cn := p.reapStaleConn()
 		p.connsMu.Unlock()
+
 		p.freeTurn()
 
 		if cn != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
index c3e7e7c..3adb99b 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -172,8 +172,7 @@
 
 func (p *StickyConnPool) badConnError() error {
 	if v := p._badConnError.Load(); v != nil {
-		err := v.(BadConnError)
-		if err.wrapped != nil {
+		if err := v.(BadConnError); err.wrapped != nil {
 			return err
 		}
 	}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
index 0fbc51e..0e6ca77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -8,6 +8,7 @@
 	"github.com/go-redis/redis/v8/internal/util"
 )
 
+// redis resp protocol data type.
 const (
 	ErrorReply  = '-'
 	StatusReply = '+'
@@ -18,7 +19,7 @@
 
 //------------------------------------------------------------------------------
 
-const Nil = RedisError("redis: nil")
+const Nil = RedisError("redis: nil") // nolint:errname
 
 type RedisError string
 
@@ -83,7 +84,7 @@
 			return nil, err
 		}
 
-		full = append(full, b...)
+		full = append(full, b...) //nolint:makezero
 		b = full
 	}
 	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
index 08d18d3..0e99476 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -10,7 +10,7 @@
 )
 
 // Scan parses bytes `b` to `v` with appropriate type.
-// nolint: gocyclo
+//nolint:gocyclo
 func Scan(b []byte, v interface{}) error {
 	switch v := v.(type) {
 	case nil:
@@ -106,6 +106,13 @@
 		var err error
 		*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
 		return err
+	case *time.Duration:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = time.Duration(n)
+		return nil
 	case encoding.BinaryUnmarshaler:
 		return v.UnmarshalBinary(b)
 	default:
@@ -131,7 +138,7 @@
 	for i, s := range data {
 		elem := next()
 		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
-			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
 			return err
 		}
 	}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
index 81b09b8..c426098 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -98,6 +98,8 @@
 	case time.Time:
 		w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
 		return w.bytes(w.numBuf)
+	case time.Duration:
+		return w.int(v.Nanoseconds())
 	case encoding.BinaryMarshaler:
 		b, err := v.MarshalBinary()
 		if err != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
index 40676f3..2edccba 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -43,3 +43,8 @@
 	s.src.Seed(seed)
 	s.mu.Unlock()
 }
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
index 862ff0e..fd2f434 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
index 4bc7970..9f2e418 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
 // +build !appengine
 
 package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
index 894382b..e34a7f0 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -4,24 +4,19 @@
 	"context"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal/proto"
 	"github.com/go-redis/redis/v8/internal/util"
-	"go.opentelemetry.io/otel/api/global"
-	"go.opentelemetry.io/otel/api/trace"
 )
 
 func Sleep(ctx context.Context, dur time.Duration) error {
-	return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
-		t := time.NewTimer(dur)
-		defer t.Stop()
+	t := time.NewTimer(dur)
+	defer t.Stop()
 
-		select {
-		case <-t.C:
-			return nil
-		case <-ctx.Done():
-			return ctx.Err()
-		}
-	})
+	select {
+	case <-t.C:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
 }
 
 func ToLower(s string) string {
@@ -49,33 +44,3 @@
 	}
 	return true
 }
-
-func Unwrap(err error) error {
-	u, ok := err.(interface {
-		Unwrap() error
-	})
-	if !ok {
-		return nil
-	}
-	return u.Unwrap()
-}
-
-//------------------------------------------------------------------------------
-
-func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
-	if span := trace.SpanFromContext(ctx); !span.IsRecording() {
-		return fn(ctx, span)
-	}
-
-	ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
-	defer span.End()
-
-	return fn(ctx, span)
-}
-
-func RecordError(ctx context.Context, err error) error {
-	if err != proto.Nil {
-		trace.SpanFromContext(ctx).RecordError(ctx, err)
-	}
-	return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
index 1b3060e..2130711 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 package util
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
index c9868aa..daa8d76 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
 // +build !appengine
 
 package util
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
index f2c16c5..a4abe32 100644
--- a/vendor/github.com/go-redis/redis/v8/options.go
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -8,14 +8,12 @@
 	"net"
 	"net/url"
 	"runtime"
+	"sort"
 	"strconv"
 	"strings"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/pool"
-	"go.opentelemetry.io/otel/api/trace"
-	"go.opentelemetry.io/otel/label"
 )
 
 // Limiter is the interface of a rate limiter or a circuit breaker.
@@ -58,7 +56,7 @@
 	DB int
 
 	// Maximum number of retries before giving up.
-	// Default is 3 retries.
+	// Default is 3 retries; -1 (not 0) disables retries.
 	MaxRetries int
 	// Minimum backoff between each retry.
 	// Default is 8 milliseconds; -1 disables backoff.
@@ -79,8 +77,12 @@
 	// Default is ReadTimeout.
 	WriteTimeout time.Duration
 
+	// Type of connection pool.
+	// true for FIFO pool, false for LIFO pool.
+	// Note that fifo has higher overhead compared to lifo.
+	PoolFIFO bool
 	// Maximum number of socket connections.
-	// Default is 10 connections per every CPU as reported by runtime.NumCPU.
+	// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
 	PoolSize int
 	// Minimum number of idle connections which is useful when establishing
 	// new connection is slow.
@@ -139,7 +141,7 @@
 		}
 	}
 	if opt.PoolSize == 0 {
-		opt.PoolSize = 10 * runtime.NumCPU()
+		opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
 	}
 	switch opt.ReadTimeout {
 	case -1:
@@ -191,9 +193,32 @@
 // Scheme is required.
 // There are two connection types: by tcp socket and by unix socket.
 // Tcp connection:
-// 		redis://<user>:<password>@<host>:<port>/<db_number>
+//		redis://<user>:<password>@<host>:<port>/<db_number>
 // Unix connection:
 //		unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+//	- field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+//	- only scalar type fields are supported (bool, int, time.Duration)
+//	- for time.Duration fields, values must be a valid input for time.ParseDuration();
+//	  additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+//	- to disable a duration field, use value less than or equal to 0; to use the default
+//	  value, leave the value blank or remove the parameter
+//	- only the last value is interpreted if a parameter is given multiple times
+//	- fields "network", "addr", "username" and "password" can only be set using other
+//	  URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+//	  names will be treated as unknown parameters
+//	- unknown parameter names will result in an error
+// Examples:
+//		redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+//		is equivalent to:
+//		&Options{
+//			Network:     "tcp",
+//			Addr:        "localhost:6789",
+//			DB:          1,               // path "/3" was overridden by "&db=1"
+//			DialTimeout: 3 * time.Second, // no time unit = seconds
+//			ReadTimeout: 6 * time.Second,
+//			MaxRetries:  2,
+//		}
 func ParseURL(redisURL string) (*Options, error) {
 	u, err := url.Parse(redisURL)
 	if err != nil {
@@ -215,10 +240,6 @@
 
 	o.Username, o.Password = getUserPassword(u)
 
-	if len(u.Query()) > 0 {
-		return nil, errors.New("redis: no options supported")
-	}
-
 	h, p, err := net.SplitHostPort(u.Host)
 	if err != nil {
 		h = u.Host
@@ -249,7 +270,7 @@
 		o.TLSConfig = &tls.Config{ServerName: h}
 	}
 
-	return o, nil
+	return setupConnParams(u, o)
 }
 
 func setupUnixConn(u *url.URL) (*Options, error) {
@@ -261,19 +282,122 @@
 		return nil, errors.New("redis: empty unix socket path")
 	}
 	o.Addr = u.Path
-
 	o.Username, o.Password = getUserPassword(u)
+	return setupConnParams(u, o)
+}
 
-	dbStr := u.Query().Get("db")
-	if dbStr == "" {
-		return o, nil // if database is not set, connect to 0 db.
+type queryOptions struct {
+	q   url.Values
+	err error
+}
+
+func (o *queryOptions) string(name string) string {
+	vs := o.q[name]
+	if len(vs) == 0 {
+		return ""
+	}
+	delete(o.q, name) // enable detection of unknown parameters
+	return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	i, err := strconv.Atoi(s)
+	if err == nil {
+		return i
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	// try plain number first
+	if i, err := strconv.Atoi(s); err == nil {
+		if i <= 0 {
+			// disable timeouts
+			return -1
+		}
+		return time.Duration(i) * time.Second
+	}
+	dur, err := time.ParseDuration(s)
+	if err == nil {
+		return dur
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+	switch s := o.string(name); s {
+	case "true", "1":
+		return true
+	case "false", "0", "":
+		return false
+	default:
+		if o.err == nil {
+			o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+		}
+		return false
+	}
+}
+
+func (o *queryOptions) remaining() []string {
+	if len(o.q) == 0 {
+		return nil
+	}
+	keys := make([]string, 0, len(o.q))
+	for k := range o.q {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+	q := queryOptions{q: u.Query()}
+
+	// compat: a future major release may use q.int("db")
+	if tmp := q.string("db"); tmp != "" {
+		db, err := strconv.Atoi(tmp)
+		if err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %w", err)
+		}
+		o.DB = db
 	}
 
-	db, err := strconv.Atoi(dbStr)
-	if err != nil {
-		return nil, fmt.Errorf("redis: invalid database number: %s", err)
+	o.MaxRetries = q.int("max_retries")
+	o.MinRetryBackoff = q.duration("min_retry_backoff")
+	o.MaxRetryBackoff = q.duration("max_retry_backoff")
+	o.DialTimeout = q.duration("dial_timeout")
+	o.ReadTimeout = q.duration("read_timeout")
+	o.WriteTimeout = q.duration("write_timeout")
+	o.PoolFIFO = q.bool("pool_fifo")
+	o.PoolSize = q.int("pool_size")
+	o.MinIdleConns = q.int("min_idle_conns")
+	o.MaxConnAge = q.duration("max_conn_age")
+	o.PoolTimeout = q.duration("pool_timeout")
+	o.IdleTimeout = q.duration("idle_timeout")
+	o.IdleCheckFrequency = q.duration("idle_check_frequency")
+	if q.err != nil {
+		return nil, q.err
 	}
-	o.DB = db
+
+	// any parameters left?
+	if r := q.remaining(); len(r) > 0 {
+		return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+	}
 
 	return o, nil
 }
@@ -292,21 +416,9 @@
 func newConnPool(opt *Options) *pool.ConnPool {
 	return pool.NewConnPool(&pool.Options{
 		Dialer: func(ctx context.Context) (net.Conn, error) {
-			var conn net.Conn
-			err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
-				span.SetAttributes(
-					label.String("db.connection_string", opt.Addr),
-				)
-
-				var err error
-				conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
-				if err != nil {
-					_ = internal.RecordError(ctx, err)
-				}
-				return err
-			})
-			return conn, err
+			return opt.Dialer(ctx, opt.Network, opt.Addr)
 		},
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json
new file mode 100644
index 0000000..e4ea4bb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/package.json
@@ -0,0 +1,8 @@
+{
+  "name": "redis",
+  "version": "8.11.5",
+  "main": "index.js",
+  "repository": "git@github.com:go-redis/redis.git",
+  "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+  "license": "BSD-2-clause"
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
index c6ec340..31bab97 100644
--- a/vendor/github.com/go-redis/redis/v8/pipeline.go
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -24,6 +24,7 @@
 // depends of your batch size and/or use TxPipeline.
 type Pipeliner interface {
 	StatefulCmdable
+	Len() int
 	Do(ctx context.Context, args ...interface{}) *Cmd
 	Process(ctx context.Context, cmd Cmder) error
 	Close() error
@@ -53,6 +54,15 @@
 	c.statefulCmdable = c.Process
 }
 
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+	c.mu.Lock()
+	ln := len(c.cmds)
+	c.mu.Unlock()
+	return ln
+}
+
+// Do queues the custom command for later execution.
 func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
 	cmd := NewCmd(ctx, args...)
 	_ = c.Process(ctx, cmd)
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
index c56270b..efc2354 100644
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -2,7 +2,6 @@
 
 import (
 	"context"
-	"errors"
 	"fmt"
 	"strings"
 	"sync"
@@ -13,13 +12,6 @@
 	"github.com/go-redis/redis/v8/internal/proto"
 )
 
-const (
-	pingTimeout     = time.Second
-	chanSendTimeout = time.Minute
-)
-
-var errPingTimeout = errors.New("redis: ping timeout")
-
 // PubSub implements Pub/Sub commands as described in
 // http://redis.io/topics/pubsub. Message receiving is NOT safe
 // for concurrent use by multiple goroutines.
@@ -43,9 +35,12 @@
 	cmd *Cmd
 
 	chOnce sync.Once
-	msgCh  chan *Message
-	allCh  chan interface{}
-	ping   chan struct{}
+	msgCh  *channel
+	allCh  *channel
+}
+
+func (c *PubSub) init() {
+	c.exit = make(chan struct{})
 }
 
 func (c *PubSub) String() string {
@@ -54,10 +49,6 @@
 	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
 }
 
-func (c *PubSub) init() {
-	c.exit = make(chan struct{})
-}
-
 func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
 	c.mu.Lock()
 	cn, err := c.conn(ctx, nil)
@@ -150,7 +141,7 @@
 	if c.cn != cn {
 		return
 	}
-	if isBadConn(err, allowTimeout) {
+	if isBadConn(err, allowTimeout, c.opt.Addr) {
 		c.reconnect(ctx, err)
 	}
 }
@@ -261,13 +252,16 @@
 	}
 	cmd := NewCmd(ctx, args...)
 
-	cn, err := c.connWithLock(ctx)
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	cn, err := c.conn(ctx, nil)
 	if err != nil {
 		return err
 	}
 
 	err = c.writeCmd(ctx, cn, cmd)
-	c.releaseConnWithLock(ctx, cn, err, false)
+	c.releaseConn(ctx, cn, err, false)
 	return err
 }
 
@@ -370,6 +364,8 @@
 		c.cmd = NewCmd(ctx)
 	}
 
+	// Don't hold the lock to allow subscriptions and pings.
+
 	cn, err := c.connWithLock(ctx)
 	if err != nil {
 		return nil, err
@@ -380,6 +376,7 @@
 	})
 
 	c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
 	if err != nil {
 		return nil, err
 	}
@@ -418,56 +415,6 @@
 	}
 }
 
-// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
-// Receive* APIs can not be used after channel is created.
-//
-// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
-func (c *PubSub) Channel() <-chan *Message {
-	return c.ChannelSize(100)
-}
-
-// ChannelSize is like Channel, but creates a Go channel
-// with specified buffer size.
-func (c *PubSub) ChannelSize(size int) <-chan *Message {
-	c.chOnce.Do(func() {
-		c.initPing()
-		c.initMsgChan(size)
-	})
-	if c.msgCh == nil {
-		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
-		panic(err)
-	}
-	if cap(c.msgCh) != size {
-		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
-		panic(err)
-	}
-	return c.msgCh
-}
-
-// ChannelWithSubscriptions is like Channel, but message type can be either
-// *Subscription or *Message. Subscription messages can be used to detect
-// reconnections.
-//
-// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
-	c.chOnce.Do(func() {
-		c.initPing()
-		c.initAllChan(size)
-	})
-	if c.allCh == nil {
-		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
-		panic(err)
-	}
-	if cap(c.allCh) != size {
-		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
-		panic(err)
-	}
-	return c.allCh
-}
-
 func (c *PubSub) getContext() context.Context {
 	if c.cmd != nil {
 		return c.cmd.ctx
@@ -475,36 +422,135 @@
 	return context.Background()
 }
 
-func (c *PubSub) initPing() {
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+	c.chOnce.Do(func() {
+		c.msgCh = newChannel(c, opts...)
+		c.msgCh.initMsgChan()
+	})
+	if c.msgCh == nil {
+		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+		panic(err)
+	}
+	return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+	return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+	c.chOnce.Do(func() {
+		c.allCh = newChannel(c, WithChannelSize(size))
+		c.allCh.initAllChan()
+	})
+	if c.allCh == nil {
+		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+		panic(err)
+	}
+	return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+	return func(c *channel) {
+		c.chanSize = size
+	}
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.checkInterval = d
+	}
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.chanSendTimeout = d
+	}
+}
+
+type channel struct {
+	pubSub *PubSub
+
+	msgCh chan *Message
+	allCh chan interface{}
+	ping  chan struct{}
+
+	chanSize        int
+	chanSendTimeout time.Duration
+	checkInterval   time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+	c := &channel{
+		pubSub: pubSub,
+
+		chanSize:        100,
+		chanSendTimeout: time.Minute,
+		checkInterval:   3 * time.Second,
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.checkInterval > 0 {
+		c.initHealthCheck()
+	}
+	return c
+}
+
+func (c *channel) initHealthCheck() {
 	ctx := context.TODO()
 	c.ping = make(chan struct{}, 1)
+
 	go func() {
 		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
-		healthy := true
 		for {
-			timer.Reset(pingTimeout)
+			timer.Reset(c.checkInterval)
 			select {
 			case <-c.ping:
-				healthy = true
 				if !timer.Stop() {
 					<-timer.C
 				}
 			case <-timer.C:
-				pingErr := c.Ping(ctx)
-				if healthy {
-					healthy = false
-				} else {
-					if pingErr == nil {
-						pingErr = errPingTimeout
-					}
-					c.mu.Lock()
-					c.reconnect(ctx, pingErr)
-					healthy = true
-					c.mu.Unlock()
+				if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+					c.pubSub.mu.Lock()
+					c.pubSub.reconnect(ctx, pingErr)
+					c.pubSub.mu.Unlock()
 				}
-			case <-c.exit:
+			case <-c.pubSub.exit:
 				return
 			}
 		}
@@ -512,16 +558,17 @@
 }
 
 // initMsgChan must be in sync with initAllChan.
-func (c *PubSub) initMsgChan(size int) {
+func (c *channel) initMsgChan() {
 	ctx := context.TODO()
-	c.msgCh = make(chan *Message, size)
+	c.msgCh = make(chan *Message, c.chanSize)
+
 	go func() {
 		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
 		var errCount int
 		for {
-			msg, err := c.Receive(ctx)
+			msg, err := c.pubSub.Receive(ctx)
 			if err != nil {
 				if err == pool.ErrClosed {
 					close(c.msgCh)
@@ -548,7 +595,7 @@
 			case *Pong:
 				// Ignore.
 			case *Message:
-				timer.Reset(chanSendTimeout)
+				timer.Reset(c.chanSendTimeout)
 				select {
 				case c.msgCh <- msg:
 					if !timer.Stop() {
@@ -556,30 +603,28 @@
 					}
 				case <-timer.C:
 					internal.Logger.Printf(
-						c.getContext(),
-						"redis: %s channel is full for %s (message is dropped)",
-						c,
-						chanSendTimeout,
-					)
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
 				}
 			default:
-				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
 			}
 		}
 	}()
 }
 
 // initAllChan must be in sync with initMsgChan.
-func (c *PubSub) initAllChan(size int) {
+func (c *channel) initAllChan() {
 	ctx := context.TODO()
-	c.allCh = make(chan interface{}, size)
+	c.allCh = make(chan interface{}, c.chanSize)
+
 	go func() {
-		timer := time.NewTimer(pingTimeout)
+		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
 		var errCount int
 		for {
-			msg, err := c.Receive(ctx)
+			msg, err := c.pubSub.Receive(ctx)
 			if err != nil {
 				if err == pool.ErrClosed {
 					close(c.allCh)
@@ -601,29 +646,23 @@
 			}
 
 			switch msg := msg.(type) {
-			case *Subscription:
-				c.sendMessage(msg, timer)
 			case *Pong:
 				// Ignore.
-			case *Message:
-				c.sendMessage(msg, timer)
+			case *Subscription, *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.allCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
 			default:
-				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
 			}
 		}
 	}()
 }
-
-func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
-	timer.Reset(pingTimeout)
-	select {
-	case c.allCh <- msg:
-		if !timer.Stop() {
-			<-timer.C
-		}
-	case <-timer.C:
-		internal.Logger.Printf(
-			c.getContext(),
-			"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
-	}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
index efad7f1..bcf8a2a 100644
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -2,14 +2,14 @@
 
 import (
 	"context"
+	"errors"
 	"fmt"
+	"sync/atomic"
 	"time"
 
 	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/pool"
 	"github.com/go-redis/redis/v8/internal/proto"
-	"go.opentelemetry.io/otel/api/trace"
-	"go.opentelemetry.io/otel/label"
 )
 
 // Nil reply returned by Redis when key does not exist.
@@ -51,9 +51,7 @@
 	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
 ) error {
 	if len(hs.hooks) == 0 {
-		err := hs.withContext(ctx, func() error {
-			return fn(ctx, cmd)
-		})
+		err := fn(ctx, cmd)
 		cmd.SetErr(err)
 		return err
 	}
@@ -69,9 +67,7 @@
 	}
 
 	if retErr == nil {
-		retErr = hs.withContext(ctx, func() error {
-			return fn(ctx, cmd)
-		})
+		retErr = fn(ctx, cmd)
 		cmd.SetErr(retErr)
 	}
 
@@ -89,9 +85,7 @@
 	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
 ) error {
 	if len(hs.hooks) == 0 {
-		err := hs.withContext(ctx, func() error {
-			return fn(ctx, cmds)
-		})
+		err := fn(ctx, cmds)
 		return err
 	}
 
@@ -106,9 +100,7 @@
 	}
 
 	if retErr == nil {
-		retErr = hs.withContext(ctx, func() error {
-			return fn(ctx, cmds)
-		})
+		retErr = fn(ctx, cmds)
 	}
 
 	for hookIndex--; hookIndex >= 0; hookIndex-- {
@@ -128,23 +120,6 @@
 	return hs.processPipeline(ctx, cmds, fn)
 }
 
-func (hs hooks) withContext(ctx context.Context, fn func() error) error {
-	done := ctx.Done()
-	if done == nil {
-		return fn()
-	}
-
-	errc := make(chan error, 1)
-	go func() { errc <- fn() }()
-
-	select {
-	case <-done:
-		return ctx.Err()
-	case err := <-errc:
-		return err
-	}
-}
-
 //------------------------------------------------------------------------------
 
 type baseClient struct {
@@ -225,12 +200,9 @@
 		return cn, nil
 	}
 
-	err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
-		return c.initConn(ctx, cn)
-	})
-	if err != nil {
+	if err := c.initConn(ctx, cn); err != nil {
 		c.connPool.Remove(ctx, cn, err)
-		if err := internal.Unwrap(err); err != nil {
+		if err := errors.Unwrap(err); err != nil {
 			return nil, err
 		}
 		return nil, err
@@ -289,7 +261,7 @@
 		c.opt.Limiter.ReportResult(err)
 	}
 
-	if isBadConn(err, false) {
+	if isBadConn(err, false, c.opt.Addr) {
 		c.connPool.Remove(ctx, cn, err)
 	} else {
 		c.connPool.Put(ctx, cn)
@@ -299,25 +271,36 @@
 func (c *baseClient) withConn(
 	ctx context.Context, fn func(context.Context, *pool.Conn) error,
 ) error {
-	return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
-		cn, err := c.getConn(ctx)
-		if err != nil {
-			return err
-		}
+	cn, err := c.getConn(ctx)
+	if err != nil {
+		return err
+	}
 
-		if span.IsRecording() {
-			if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
-				span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
-			}
-		}
+	defer func() {
+		c.releaseConn(ctx, cn, err)
+	}()
 
-		defer func() {
-			c.releaseConn(ctx, cn, err)
-		}()
+	done := ctx.Done() //nolint:ifshort
 
+	if done == nil {
 		err = fn(ctx, cn)
 		return err
-	})
+	}
+
+	errc := make(chan error, 1)
+	go func() { errc <- fn(ctx, cn) }()
+
+	select {
+	case <-done:
+		_ = cn.Close()
+		// Wait for the goroutine to finish and send something.
+		<-errc
+
+		err = ctx.Err()
+		return err
+	case err = <-errc:
+		return err
+	}
 }
 
 func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
@@ -325,45 +308,50 @@
 	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
 		attempt := attempt
 
-		var retry bool
-		err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
-			if attempt > 0 {
-				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
-					return err
-				}
-			}
-
-			retryTimeout := true
-			err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
-				err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
-					return writeCmd(wr, cmd)
-				})
-				if err != nil {
-					return err
-				}
-
-				err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
-				if err != nil {
-					retryTimeout = cmd.readTimeout() == nil
-					return err
-				}
-
-				return nil
-			})
-			if err == nil {
-				return nil
-			}
-			retry = shouldRetry(err, retryTimeout)
-			return err
-		})
+		retry, err := c._process(ctx, cmd, attempt)
 		if err == nil || !retry {
 			return err
 		}
+
 		lastErr = err
 	}
 	return lastErr
 }
 
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+	if attempt > 0 {
+		if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+			return false, err
+		}
+	}
+
+	retryTimeout := uint32(1)
+	err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+		err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+			return writeCmd(wr, cmd)
+		})
+		if err != nil {
+			return err
+		}
+
+		err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+		if err != nil {
+			if cmd.readTimeout() == nil {
+				atomic.StoreUint32(&retryTimeout, 1)
+			}
+			return err
+		}
+
+		return nil
+	})
+	if err == nil {
+		return false, nil
+	}
+
+	retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+	return retry, err
+}
+
 func (c *baseClient) retryBackoff(attempt int) time.Duration {
 	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
 }
@@ -722,7 +710,9 @@
 	hooks // TODO: inherit hooks
 }
 
-// Conn is like Client, but its pool contains single connection.
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
 type Conn struct {
 	*conn
 	ctx context.Context
diff --git a/vendor/github.com/go-redis/redis/v8/renovate.json b/vendor/github.com/go-redis/redis/v8/renovate.json
deleted file mode 100644
index f45d8f1..0000000
--- a/vendor/github.com/go-redis/redis/v8/renovate.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "extends": [
-    "config:base"
-  ]
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
index 34d05f3..4df00fc 100644
--- a/vendor/github.com/go-redis/redis/v8/ring.go
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -12,7 +12,8 @@
 	"time"
 
 	"github.com/cespare/xxhash/v2"
-	"github.com/dgryski/go-rendezvous"
+	rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
 	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/hashtag"
 	"github.com/go-redis/redis/v8/internal/pool"
@@ -78,6 +79,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -138,6 +142,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
@@ -571,7 +576,7 @@
 	}
 	info := cmdsInfo[name]
 	if info == nil {
-		internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+		internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
 	}
 	return info
 }
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
index 07ed482..5cab18d 100644
--- a/vendor/github.com/go-redis/redis/v8/script.go
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -8,7 +8,7 @@
 	"strings"
 )
 
-type scripter interface {
+type Scripter interface {
 	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
 	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
 	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
@@ -16,9 +16,9 @@
 }
 
 var (
-	_ scripter = (*Client)(nil)
-	_ scripter = (*Ring)(nil)
-	_ scripter = (*ClusterClient)(nil)
+	_ Scripter = (*Client)(nil)
+	_ Scripter = (*Ring)(nil)
+	_ Scripter = (*ClusterClient)(nil)
 )
 
 type Script struct {
@@ -38,25 +38,25 @@
 	return s.hash
 }
 
-func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
 	return c.ScriptLoad(ctx, s.src)
 }
 
-func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
 	return c.ScriptExists(ctx, s.hash)
 }
 
-func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	return c.Eval(ctx, s.src, keys, args...)
 }
 
-func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	return c.EvalSha(ctx, s.hash, keys, args...)
 }
 
 // Run optimistically uses EVALSHA to run the script. If script does not exist
 // it is retried using EVAL.
-func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	r := s.EvalSha(ctx, c, keys, args...)
 	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
 		return s.Eval(ctx, c, keys, args...)
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
index 7db9843..ec6221d 100644
--- a/vendor/github.com/go-redis/redis/v8/sentinel.go
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -23,7 +23,13 @@
 	MasterName string
 	// A seed list of host:port addresses of sentinel nodes.
 	SentinelAddrs []string
-	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+
+	// If specified with SentinelPassword, enables ACL-based authentication (via
+	// AUTH <user> <pass>).
+	SentinelUsername string
+	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+	// configuration, or, if SentinelUsername is also supplied, used for ACL-based
+	// authentication.
 	SentinelPassword string
 
 	// Allows routing read-only commands to the closest master or slave node.
@@ -36,6 +42,10 @@
 	// Route all commands to slave read-only nodes.
 	SlaveOnly bool
 
+	// Use slaves disconnected with master when cannot get connected slaves
+	// Now, this option only works in RandomSlaveAddr function.
+	UseDisconnectedSlaves bool
+
 	// Following options are copied from Options struct.
 
 	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
@@ -53,6 +63,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -82,6 +95,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -101,6 +115,7 @@
 		OnConnect: opt.OnConnect,
 
 		DB:       0,
+		Username: opt.SentinelUsername,
 		Password: opt.SentinelPassword,
 
 		MaxRetries:      opt.MaxRetries,
@@ -111,6 +126,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -142,6 +158,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -167,6 +184,10 @@
 	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
 	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
 
+	rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+		sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+	})
+
 	failover := &sentinelFailover{
 		opt:           failoverOpt,
 		sentinelAddrs: sentinelAddrs,
@@ -177,11 +198,14 @@
 	opt.init()
 
 	connPool := newConnPool(opt)
+
+	failover.mu.Lock()
 	failover.onFailover = func(ctx context.Context, addr string) {
 		_ = connPool.Filter(func(cn *pool.Conn) bool {
 			return cn.RemoteAddr().String() != addr
 		})
 	}
+	failover.mu.Unlock()
 
 	c := Client{
 		baseClient: newBaseClient(opt, connPool),
@@ -208,14 +232,21 @@
 				failover.trySwitchMaster(ctx, addr)
 			}
 		}
-
 		if err != nil {
 			return nil, err
 		}
 		if failover.opt.Dialer != nil {
 			return failover.opt.Dialer(ctx, network, addr)
 		}
-		return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+
+		netDialer := &net.Dialer{
+			Timeout:   failover.opt.DialTimeout,
+			KeepAlive: 5 * time.Minute,
+		}
+		if failover.opt.TLSConfig == nil {
+			return netDialer.DialContext(ctx, network, addr)
+		}
+		return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
 	}
 }
 
@@ -430,10 +461,22 @@
 }
 
 func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
-	addresses, err := c.slaveAddrs(ctx)
+	if c.opt == nil {
+		return "", errors.New("opt is nil")
+	}
+
+	addresses, err := c.slaveAddrs(ctx, false)
 	if err != nil {
 		return "", err
 	}
+
+	if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+		addresses, err = c.slaveAddrs(ctx, true)
+		if err != nil {
+			return "", err
+		}
+	}
+
 	if len(addresses) == 0 {
 		return c.MasterAddr(ctx)
 	}
@@ -482,10 +525,10 @@
 		return addr, nil
 	}
 
-	return "", errors.New("redis: all sentinels are unreachable")
+	return "", errors.New("redis: all sentinels specified in configuration are unreachable")
 }
 
-func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
 	c.mu.RLock()
 	sentinel := c.sentinel
 	c.mu.RUnlock()
@@ -508,6 +551,8 @@
 		_ = c.closeSentinel()
 	}
 
+	var sentinelReachable bool
+
 	for i, sentinelAddr := range c.sentinelAddrs {
 		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
 
@@ -518,16 +563,22 @@
 			_ = sentinel.Close()
 			continue
 		}
-
+		sentinelReachable = true
+		addrs := parseSlaveAddrs(slaves, useDisconnected)
+		if len(addrs) == 0 {
+			continue
+		}
 		// Push working sentinel to the top.
 		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
 		c.setSentinel(ctx, sentinel)
 
-		addrs := parseSlaveAddrs(slaves)
 		return addrs, nil
 	}
 
-	return []string{}, errors.New("redis: all sentinels are unreachable")
+	if sentinelReachable {
+		return []string{}, nil
+	}
+	return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
 }
 
 func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
@@ -547,12 +598,11 @@
 			c.opt.MasterName, err)
 		return []string{}
 	}
-	return parseSlaveAddrs(addrs)
+	return parseSlaveAddrs(addrs, false)
 }
 
-func parseSlaveAddrs(addrs []interface{}) []string {
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
 	nodes := make([]string, 0, len(addrs))
-
 	for _, node := range addrs {
 		ip := ""
 		port := ""
@@ -574,8 +624,12 @@
 
 		for _, flag := range flags {
 			switch flag {
-			case "s_down", "o_down", "disconnected":
+			case "s_down", "o_down":
 				isDown = true
+			case "disconnected":
+				if !keepDisconnected {
+					isDown = true
+				}
 			}
 		}
 
@@ -589,7 +643,7 @@
 
 func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
 	c.mu.RLock()
-	currentAddr := c._masterAddr
+	currentAddr := c._masterAddr //nolint:ifshort
 	c.mu.RUnlock()
 
 	if addr == currentAddr {
@@ -630,15 +684,22 @@
 	}
 	for _, sentinel := range sentinels {
 		vals := sentinel.([]interface{})
+		var ip, port string
 		for i := 0; i < len(vals); i += 2 {
 			key := vals[i].(string)
-			if key == "name" {
-				sentinelAddr := vals[i+1].(string)
-				if !contains(c.sentinelAddrs, sentinelAddr) {
-					internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
-						sentinelAddr, c.opt.MasterName)
-					c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
-				}
+			switch key {
+			case "ip":
+				ip = vals[i+1].(string)
+			case "port":
+				port = vals[i+1].(string)
+			}
+		}
+		if ip != "" && port != "" {
+			sentinelAddr := net.JoinHostPort(ip, port)
+			if !contains(c.sentinelAddrs, sentinelAddr) {
+				internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+					sentinelAddr, c.opt.MasterName)
+				c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
 			}
 		}
 	}
@@ -646,6 +707,7 @@
 
 func (c *sentinelFailover) listen(pubsub *PubSub) {
 	ctx := context.TODO()
+
 	if c.onUpdate != nil {
 		c.onUpdate(ctx)
 	}
@@ -701,7 +763,7 @@
 			Addr: masterAddr,
 		}}
 
-		slaveAddrs, err := failover.slaveAddrs(ctx)
+		slaveAddrs, err := failover.slaveAddrs(ctx, false)
 		if err != nil {
 			return nil, err
 		}
@@ -723,9 +785,12 @@
 	}
 
 	c := NewClusterClient(opt)
+
+	failover.mu.Lock()
 	failover.onUpdate = func(ctx context.Context) {
 		c.ReloadState(ctx)
 	}
+	failover.mu.Unlock()
 
 	return c
 }
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
index ad825c6..8c9d872 100644
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -13,7 +13,8 @@
 // Tx implements Redis transactions as described in
 // http://redis.io/topics/transactions. It's NOT safe for concurrent use
 // by multiple goroutines, because Exec resets list of watched keys.
-// If you don't need WATCH it is better to use Pipeline.
+//
+// If you don't need WATCH, use Pipeline instead.
 type Tx struct {
 	baseClient
 	cmdable
@@ -65,16 +66,13 @@
 // The transaction is automatically closed when fn exits.
 func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
 	tx := c.newTx(ctx)
+	defer tx.Close(ctx)
 	if len(keys) > 0 {
 		if err := tx.Watch(ctx, keys...).Err(); err != nil {
-			_ = tx.Close(ctx)
 			return err
 		}
 	}
-
-	err := fn(tx)
-	_ = tx.Close(ctx)
-	return err
+	return fn(tx)
 }
 
 // Close closes the transaction, releasing any open resources.
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
index 5f0e1e3..c89b3e5 100644
--- a/vendor/github.com/go-redis/redis/v8/universal.go
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -25,6 +25,7 @@
 
 	Username         string
 	Password         string
+	SentinelUsername string
 	SentinelPassword string
 
 	MaxRetries      int
@@ -35,6 +36,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -53,6 +57,7 @@
 
 	// The sentinel master name.
 	// Only failover clients.
+
 	MasterName string
 }
 
@@ -82,6 +87,7 @@
 		DialTimeout:        o.DialTimeout,
 		ReadTimeout:        o.ReadTimeout,
 		WriteTimeout:       o.WriteTimeout,
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -109,6 +115,7 @@
 		DB:               o.DB,
 		Username:         o.Username,
 		Password:         o.Password,
+		SentinelUsername: o.SentinelUsername,
 		SentinelPassword: o.SentinelPassword,
 
 		MaxRetries:      o.MaxRetries,
@@ -119,6 +126,7 @@
 		ReadTimeout:  o.ReadTimeout,
 		WriteTimeout: o.WriteTimeout,
 
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -154,6 +162,7 @@
 		ReadTimeout:  o.ReadTimeout,
 		WriteTimeout: o.WriteTimeout,
 
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -168,9 +177,9 @@
 // --------------------------------------------------------------------
 
 // UniversalClient is an abstract client which - based on the provided options -
-// can connect to either clusters, or sentinel-backed failover instances
-// or simple single-instance servers. This can be useful for testing
-// cluster-specific applications locally.
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
 type UniversalClient interface {
 	Cmdable
 	Context() context.Context
@@ -190,12 +199,12 @@
 	_ UniversalClient = (*Ring)(nil)
 )
 
-// NewUniversalClient returns a new multi client. The type of client returned depends
-// on the following three conditions:
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
 //
-// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
-// 2. if the number of Addrs is two or more, a ClusterClient will be returned
-// 3. otherwise, a single-node redis Client will be returned.
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
 func NewUniversalClient(opts *UniversalOptions) UniversalClient {
 	if opts.MasterName != "" {
 		return NewFailoverClient(opts.Failover())
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
new file mode 100644
index 0000000..112c9a2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+	return "8.11.5"
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
deleted file mode 100644
index a5a1386..0000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
-
-package descriptor
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/descriptor.proto.
-
-type Edition = descriptorpb.Edition
-
-const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN
-const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2
-const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3
-const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023
-const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024
-const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY
-const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY
-const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY
-const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY
-const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY
-const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX
-
-var Edition_name = descriptorpb.Edition_name
-var Edition_value = descriptorpb.Edition_value
-
-type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState
-
-const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION
-const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED
-
-var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name
-var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value
-
-type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
-
-const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
-const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
-const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
-const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
-const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
-const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
-const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
-const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
-const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
-const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
-const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
-const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
-const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
-const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
-const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
-const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
-const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
-const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
-
-var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
-var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
-
-type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
-
-const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
-const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
-const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
-
-var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
-var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
-
-type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
-
-const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
-const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
-const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
-
-var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
-var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
-
-type FieldOptions_CType = descriptorpb.FieldOptions_CType
-
-const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
-const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
-const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
-
-var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
-var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
-
-type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
-
-const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
-const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
-const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
-
-var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
-var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
-
-type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention
-
-const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN
-const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME
-const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE
-
-var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name
-var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value
-
-type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType
-
-const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN
-const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE
-const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE
-const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE
-const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD
-const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF
-const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM
-const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY
-const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE
-const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD
-
-var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name
-var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value
-
-type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
-
-const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
-const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
-const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
-
-var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
-var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
-
-type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence
-
-const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN
-const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT
-const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT
-const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED
-
-var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name
-var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value
-
-type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType
-
-const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN
-const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN
-const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED
-
-var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name
-var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value
-
-type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding
-
-const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
-const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED
-const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED
-
-var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name
-var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value
-
-type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation
-
-const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN
-const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY
-const FeatureSet_NONE = descriptorpb.FeatureSet_NONE
-
-var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name
-var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value
-
-type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding
-
-const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN
-const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED
-const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED
-
-var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name
-var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value
-
-type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat
-
-const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN
-const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW
-const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT
-
-var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name
-var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value
-
-type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic
-
-const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE
-const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET
-const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS
-
-var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name
-var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value
-
-type FileDescriptorSet = descriptorpb.FileDescriptorSet
-type FileDescriptorProto = descriptorpb.FileDescriptorProto
-type DescriptorProto = descriptorpb.DescriptorProto
-type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
-
-const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification
-
-type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
-type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
-type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
-type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
-type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
-type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
-
-const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
-const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
-
-type FileOptions = descriptorpb.FileOptions
-
-const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
-const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
-const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
-const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
-const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
-const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
-const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
-const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
-
-type MessageOptions = descriptorpb.MessageOptions
-
-const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
-const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
-const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
-
-type FieldOptions = descriptorpb.FieldOptions
-
-const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
-const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
-const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
-const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy
-const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
-const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
-const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact
-
-type OneofOptions = descriptorpb.OneofOptions
-type EnumOptions = descriptorpb.EnumOptions
-
-const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
-
-type EnumValueOptions = descriptorpb.EnumValueOptions
-
-const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
-const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact
-
-type ServiceOptions = descriptorpb.ServiceOptions
-
-const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
-
-type MethodOptions = descriptorpb.MethodOptions
-
-const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
-const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
-
-type UninterpretedOption = descriptorpb.UninterpretedOption
-type FeatureSet = descriptorpb.FeatureSet
-type FeatureSetDefaults = descriptorpb.FeatureSetDefaults
-type SourceCodeInfo = descriptorpb.SourceCodeInfo
-type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
-type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
-type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
-type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration
-type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
-type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault
-type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
-type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault
-type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
-type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
-
-var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
-	0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
-	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
-	0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
-	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x32,
-}
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
-func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
-	if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/golang/protobuf/proto"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
-
-	anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
-	name, err := anyMessageName(any)
-	return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
-	if any == nil {
-		return "", fmt.Errorf("message is nil")
-	}
-	name := protoreflect.FullName(any.TypeUrl)
-	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
-		name = name[i+len("/"):]
-	}
-	if !name.IsValid() {
-		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
-	}
-	return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
-	switch dm := m.(type) {
-	case DynamicAny:
-		m = dm.Message
-	case *DynamicAny:
-		if dm == nil {
-			return nil, proto.ErrNil
-		}
-		m = dm.Message
-	}
-	b, err := proto.Marshal(m)
-	if err != nil {
-		return nil, err
-	}
-	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
-	name, err := anyMessageName(any)
-	if err != nil {
-		return nil, err
-	}
-	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
-	if err != nil {
-		return nil, err
-	}
-	return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
-	if dm, ok := m.(*DynamicAny); ok {
-		if dm.Message == nil {
-			var err error
-			dm.Message, err = Empty(any)
-			if err != nil {
-				return err
-			}
-		}
-		m = dm.Message
-	}
-
-	anyName, err := AnyMessageName(any)
-	if err != nil {
-		return err
-	}
-	msgName := proto.MessageName(m)
-	if anyName != msgName {
-		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
-	}
-	return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
-	if any == nil || m == nil {
-		return false
-	}
-	name := proto.MessageName(m)
-	if !strings.HasSuffix(any.TypeUrl, name) {
-		return false
-	}
-	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-//	var x ptypes.DynamicAny
-//	if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-//	fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
-	if m.Message == nil {
-		return "<nil>"
-	}
-	return m.Message.String()
-}
-func (m DynamicAny) Reset() {
-	if m.Message == nil {
-		return
-	}
-	m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
-	return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
-	if m.Message == nil {
-		return nil
-	}
-	return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
-	return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
-	return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
-	return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
-	return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
-	return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c3325..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
-	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
-	minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
-	if err := validateDuration(dur); err != nil {
-		return 0, err
-	}
-	d := time.Duration(dur.Seconds) * time.Second
-	if int64(d/time.Second) != dur.Seconds {
-		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-	}
-	if dur.Nanos != 0 {
-		d += time.Duration(dur.Nanos) * time.Nanosecond
-		if (d < 0) != (dur.Nanos < 0) {
-			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-		}
-	}
-	return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
-	nanos := d.Nanoseconds()
-	secs := nanos / 1e9
-	nanos -= secs * 1e9
-	return &durationpb.Duration{
-		Seconds: int64(secs),
-		Nanos:   int32(nanos),
-	}
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
-	if dur == nil {
-		return errors.New("duration: nil Duration")
-	}
-	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
-		return fmt.Errorf("duration: %v: seconds out of range", dur)
-	}
-	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
-		return fmt.Errorf("duration: %v: nanos out of range", dur)
-	}
-	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
-	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
-		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	durationpb "google.golang.org/protobuf/types/known/durationpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
-	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
-	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index 8d82abe..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/struct/struct.proto
-
-package structpb
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	structpb "google.golang.org/protobuf/types/known/structpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/struct.proto.
-
-type NullValue = structpb.NullValue
-
-const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE
-
-var NullValue_name = structpb.NullValue_name
-var NullValue_value = structpb.NullValue_value
-
-type Struct = structpb.Struct
-type Value = structpb.Value
-type Value_NullValue = structpb.Value_NullValue
-type Value_NumberValue = structpb.Value_NumberValue
-type Value_StringValue = structpb.Value_StringValue
-type Value_BoolValue = structpb.Value_BoolValue
-type Value_StructValue = structpb.Value_StructValue
-type Value_ListValue = structpb.Value_ListValue
-type ListValue = structpb.ListValue
-
-var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{
-	0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
-	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
-	0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
-	0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() }
-func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
-	// Seconds field of the earliest valid Timestamp.
-	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	minValidSeconds = -62135596800
-	// Seconds field just after the latest valid Timestamp.
-	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
-	// Don't return the zero value on error, because corresponds to a valid
-	// timestamp. Instead return whatever time.Unix gives us.
-	var t time.Time
-	if ts == nil {
-		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
-	} else {
-		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
-	}
-	return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
-	ts, err := TimestampProto(time.Now())
-	if err != nil {
-		panic("ptypes: time.Now() out of Timestamp range")
-	}
-	return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
-	ts := &timestamppb.Timestamp{
-		Seconds: t.Unix(),
-		Nanos:   int32(t.Nanosecond()),
-	}
-	if err := validateTimestamp(ts); err != nil {
-		return nil, err
-	}
-	return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
-	t, err := Timestamp(ts)
-	if err != nil {
-		return fmt.Sprintf("(%v)", err)
-	}
-	return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
-	if ts == nil {
-		return errors.New("timestamp: nil Timestamp")
-	}
-	if ts.Seconds < minValidSeconds {
-		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
-	}
-	if ts.Seconds >= maxValidSeconds {
-		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
-	}
-	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
-		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
deleted file mode 100644
index cc40f27..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
-
-package wrappers
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/wrappers.proto.
-
-type DoubleValue = wrapperspb.DoubleValue
-type FloatValue = wrapperspb.FloatValue
-type Int64Value = wrapperspb.Int64Value
-type UInt64Value = wrapperspb.UInt64Value
-type Int32Value = wrapperspb.Int32Value
-type UInt32Value = wrapperspb.UInt32Value
-type BoolValue = wrapperspb.BoolValue
-type StringValue = wrapperspb.StringValue
-type BytesValue = wrapperspb.BytesValue
-
-var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
-	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
-	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
-	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
-	0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
-	0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
-func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
-  - 1.4.3
-  - 1.5.3
-  - tip
-
-script:
-  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09..a502fdc 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
 
 We definitely welcome patches and contribution to this project!
 
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
 ### Legal requirements
 
 In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46..3e9a618 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
 The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
 and DCE 1.1: Authentication and Security Services. 
 
 This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@
 change is the ability to represent an invalid UUID (vs a NIL UUID).
 
 ###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
 
 ###### Documentation 
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
 
 Full `go doc` style documentation for the package can be viewed online without
 installing this package by using the GoDoc site here: 
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4b..dc60082 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@
 	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
 	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
 	Nil           UUID // empty UUID, all zeros
+
+	// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+	Max = UUID{
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	}
 )
 
 // NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
 package uuid
 
 // getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
 // Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
 func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06c..c351129 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@
 }
 
 // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid.  The time is only defined for version 1 and 2 UUIDs.
+// uuid.  The time is only defined for version 1, 2, 6 and 7 UUIDs.
 func (uuid UUID) Time() Time {
-	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
-	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
-	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
-	return Time(time)
+	var t Time
+	switch uuid.Version() {
+	case 6:
+		time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+		t = Time(time)
+	case 7:
+		time := binary.BigEndian.Uint64(uuid[:8])
+		t = Time((time>>16)*10000 + g1582ns100)
+	default: // forward compatible
+		time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+		time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+		time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+		t = Time(time)
+	}
+	return t
 }
 
 // ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207a..5232b48 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@
 	return ok
 }
 
-// Parse decodes s into a UUID or returns an error.  Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed.  Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded.  In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g.  {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}.  Only the middle 36 bytes are
+// examined in the latter case.  Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
 func Parse(s string) (UUID, error) {
 	var uuid UUID
 	switch len(s) {
@@ -69,7 +73,7 @@
 
 	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9:
-		if strings.ToLower(s[:9]) != "urn:uuid:" {
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
 		}
 		s = s[9:]
@@ -101,7 +105,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(s[x], s[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@
 	switch len(b) {
 	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+		if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
 		}
 		b = b[9:]
@@ -145,7 +150,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(b[x], b[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@
 	return uuid
 }
 
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+//   xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+//   {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+	switch len(s) {
+	// Standard UUID format
+	case 36:
+
+	// UUID with "urn:uuid:" prefix
+	case 36 + 9:
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
+			return fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// UUID enclosed in braces
+	case 36 + 2:
+		if s[0] != '{' || s[len(s)-1] != '}' {
+			return fmt.Errorf("invalid bracketed UUID format")
+		}
+		s = s[1 : len(s)-1]
+
+	// UUID without hyphens
+	case 32:
+		for i := 0; i < len(s); i += 2 {
+			_, ok := xtob(s[i], s[i+1])
+			if !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+
+	default:
+		return invalidLengthError{len(s)}
+	}
+
+	// Check for standard UUID format
+	if len(s) == 36 {
+		if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+			return errors.New("invalid UUID format")
+		}
+		for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+			if _, ok := xtob(s[x], s[x+1]); !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+	}
+
+	return nil
+}
+
 // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 // , or "" if uuid is invalid.
 func (uuid UUID) String() string {
@@ -292,3 +351,15 @@
 	poolMu.Lock()
 	poolPos = randPoolSize
 }
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+	var uuidStrs = make([]string, len(uuids))
+	for i, uuid := range uuids {
+		uuidStrs[i] = uuid.String()
+	}
+	return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	/*
+	    0                   1                   2                   3
+	    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                           time_high                           |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |           time_mid            |      time_low_and_version     |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |clk_seq_hi_res |  clk_seq_low  |         node (0-1)            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                         node (2-5)                            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+
+	binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	uuid[6] = 0x60 | (uuid[6] & 0x0F)
+	uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+	uuid, err := NewRandom()
+	if err != nil {
+		return uuid, err
+	}
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+	uuid, err := NewRandomFromReader(r)
+	if err != nil {
+		return uuid, err
+	}
+
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+	/*
+		 0                   1                   2                   3
+		 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                           unix_ts_ms                          |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|          unix_ts_ms           |  ver  |  rand_a (12 bit seq)  |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|var|                        rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                            rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+	_ = uuid[15] // bounds check
+
+	t, s := getV7Time()
+
+	uuid[0] = byte(t >> 40)
+	uuid[1] = byte(t >> 32)
+	uuid[2] = byte(t >> 24)
+	uuid[3] = byte(t >> 16)
+	uuid[4] = byte(t >> 8)
+	uuid[5] = byte(t)
+
+	uuid[6] = 0x70 | (0x0F & byte(s>>8))
+	uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+//	52 bits of time in milliseconds since epoch
+//	12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+	timeMu.Lock()
+	defer timeMu.Unlock()
+
+	nano := timeNow().UnixNano()
+	milli = nano / nanoPerMilli
+	// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+	seq = (nano - milli*nanoPerMilli) >> 8
+	now := milli<<12 + seq
+	if now <= lastV7time {
+		now = lastV7time + 1
+		milli = now >> 12
+		seq = now & 0xfff
+	}
+	lastV7time = now
+	return milli, seq
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
deleted file mode 100644
index fc198d8..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-sudo: false
-language: go
-go:
-  - 1.13.x
-  - 1.14.x
-  - 1.15.x
-
-env:
-  global:
-    - GO111MODULE=on
-
-script:
- - make test
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
deleted file mode 100644
index 6eeb7e2..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
-
-Types of changes:
-- `Added` for new features.
-- `Changed` for changes in existing functionality.
-- `Deprecated` for soon-to-be removed features.
-- `Removed` for now removed features.
-- `Fixed` for any bug fixes.
-- `Security` in case of vulnerabilities.
-
-## [Unreleased]
-
-### Added
-
-- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
-
-## [v1.1.0] - 2019-09-12
-### Added
-- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
-- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2  - [kush-patel-hs](https://github.com/kush-patel-hs)
-- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
-- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
-- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
-- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
-- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
-
-### Deprecated
-- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
-- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
-
-### Fixed
-- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
-- Numerious documentation fixes.
-
-## v1.0.0 - 2018-05-08
-### Added
-- grpc_auth 
-- grpc_ctxtags
-- grpc_zap
-- grpc_logrus
-- grpc_opentracing
-- grpc_retry
-- grpc_validator
-- grpc_recovery
-
-[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD 
-[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
index 814e155..a12b409 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -7,16 +7,23 @@
 [![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
 [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
 [![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
-[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
+[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://gophers.slack.com/archives/CNJL30P4P)
 
 [gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
 
+## ⚠️  Status
+
+Version [v2](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2) is about to be released, with migration guide, which will replace v1. Try v2 and give us feedback! 
+
+Version v1 is currently in deprecation mode, which means only critical and safety bug fixes will be merged.
+
+
 ## Middleware
 
 [gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
-Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) 
+Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
 that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement
-common patterns: auth, logging, message, validation, retries or monitoring.
+common patterns: auth, logging, message, validation, retries, or monitoring.
 
 These are generic building blocks that make it easy to build multiple microservices easily.
 The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
@@ -29,57 +36,57 @@
 
 myServer := grpc.NewServer(
     grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
-        grpc_recovery.StreamServerInterceptor(),
         grpc_ctxtags.StreamServerInterceptor(),
         grpc_opentracing.StreamServerInterceptor(),
         grpc_prometheus.StreamServerInterceptor,
         grpc_zap.StreamServerInterceptor(zapLogger),
         grpc_auth.StreamServerInterceptor(myAuthFunction),
+        grpc_recovery.StreamServerInterceptor(),
     )),
     grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
-        grpc_recovery.UnaryServerInterceptor(),
         grpc_ctxtags.UnaryServerInterceptor(),
         grpc_opentracing.UnaryServerInterceptor(),
         grpc_prometheus.UnaryServerInterceptor,
         grpc_zap.UnaryServerInterceptor(zapLogger),
         grpc_auth.UnaryServerInterceptor(myAuthFunction),
+        grpc_recovery.UnaryServerInterceptor(),
     )),
 )
 ```
 
 ## Interceptors
 
-*Please send a PR to add new interceptors or middleware to this list*
+_Please send a PR to add new interceptors or middleware to this list_
 
 #### Auth
-   * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware 
+
+- [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
 
 #### Logging
-   * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
-   * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
-   * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
-   * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
-   * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
+
+- [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
+- [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
+- [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
+- [`grpc_kit`](logging/kit/) - integration of [go-kit/log](https://github.com/go-kit/log) logging library into gRPC handlers.
+- [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
 
 #### Monitoring
-   * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
-   * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
-   * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+
+- [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
+- [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
+- [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+- [`otelgrpc`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation/google.golang.org/grpc/otelgrpc) - [OpenTelemetry](https://opentelemetry.io/) client-side and server-side interceptors
 
 #### Client
-   * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
+
+- [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
 
 #### Server
-   * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
-   * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
-   * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
 
+- [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
+- [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+- [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
 
-## Status
-
-This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
-
-Additional tooling will be added, and contributions are welcome.
 
 ## License
 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
index ea3738b..407d933 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -16,22 +16,41 @@
 // Execution is done in left-to-right order, including passing of context.
 // For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
 // will see context changes of one and two.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainUnaryInterceptor directly.
 func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+			return handler(ctx, req)
+		}
+	}
+
+	// The degenerate case, just return the single wrapped interceptor directly.
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// Return a function which satisfies the interceptor interface, and which is
+	// a closure over the given list of interceptors to be chained.
 	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
-		chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
-			return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
-				return currentInter(currentCtx, currentReq, info, currentHandler)
+		currHandler := handler
+		// Iterate backwards through all interceptors except the first (outermost).
+		// Wrap each one in a function which satisfies the handler interface, but
+		// is also a closure over the `info` and `handler` parameters. Then pass
+		// each pseudo-handler to the next outer interceptor as the handler to be called.
+		for i := n - 1; i > 0; i-- {
+			// Rebind to loop-local vars so they can be closed over.
+			innerHandler, i := currHandler, i
+			currHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+				return interceptors[i](currentCtx, currentReq, info, innerHandler)
 			}
 		}
-
-		chainedHandler := handler
-		for i := n - 1; i >= 0; i-- {
-			chainedHandler = chainer(interceptors[i], chainedHandler)
-		}
-
-		return chainedHandler(ctx, req)
+		// Finally return the result of calling the outermost interceptor with the
+		// outermost pseudo-handler created above as its handler.
+		return interceptors[0](ctx, req, info, currHandler)
 	}
 }
 
@@ -40,22 +59,31 @@
 // Execution is done in left-to-right order, including passing of context.
 // For example ChainUnaryServer(one, two, three) will execute one before two before three.
 // If you want to pass context between interceptors, use WrapServerStream.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainStreamInterceptor directly.
 func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
 	n := len(interceptors)
 
-	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
-		chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
-			return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
-				return currentInter(currentSrv, currentStream, info, currentHandler)
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+			return handler(srv, stream)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		currHandler := handler
+		for i := n - 1; i > 0; i-- {
+			innerHandler, i := currHandler, i
+			currHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+				return interceptors[i](currentSrv, currentStream, info, innerHandler)
 			}
 		}
-
-		chainedHandler := handler
-		for i := n - 1; i >= 0; i-- {
-			chainedHandler = chainer(interceptors[i], chainedHandler)
-		}
-
-		return chainedHandler(srv, ss)
+		return interceptors[0](srv, stream, info, currHandler)
 	}
 }
 
@@ -66,19 +94,26 @@
 func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+			return invoker(ctx, method, req, reply, cc, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
 	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-		chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
-			return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
-				return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
+		currInvoker := invoker
+		for i := n - 1; i > 0; i-- {
+			innerInvoker, i := currInvoker, i
+			currInvoker = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+				return interceptors[i](currentCtx, currentMethod, currentReq, currentRepl, currentConn, innerInvoker, currentOpts...)
 			}
 		}
-
-		chainedInvoker := invoker
-		for i := n - 1; i >= 0; i-- {
-			chainedInvoker = chainer(interceptors[i], chainedInvoker)
-		}
-
-		return chainedInvoker(ctx, method, req, reply, cc, opts...)
+		return interceptors[0](ctx, method, req, reply, cc, currInvoker, opts...)
 	}
 }
 
@@ -89,19 +124,26 @@
 func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+			return streamer(ctx, desc, cc, method, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
 	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
-		chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
-			return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
-				return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
+		currStreamer := streamer
+		for i := n - 1; i > 0; i-- {
+			innerStreamer, i := currStreamer, i
+			currStreamer = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+				return interceptors[i](currentCtx, currentDesc, currentConn, currentMethod, innerStreamer, currentOpts...)
 			}
 		}
-
-		chainedStreamer := streamer
-		for i := n - 1; i >= 0; i-- {
-			chainedStreamer = chainer(interceptors[i], chainedStreamer)
-		}
-
-		return chainedStreamer(ctx, desc, cc, method, opts...)
+		return interceptors[0](ctx, desc, cc, method, currStreamer, opts...)
 	}
 }
 
@@ -109,12 +151,16 @@
 //
 // WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
 // Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainUnaryInterceptor instead.
 func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
-	return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
+	return grpc.ChainUnaryInterceptor(interceptors...)
 }
 
 // WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
 // Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainStreamInterceptor instead.
 func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
-	return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
+	return grpc.ChainStreamInterceptor(interceptors...)
 }
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
index afd924a..f8ba719 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
@@ -18,7 +18,7 @@
 linear backoff with 10% jitter.
 
 For chained interceptors, the retry interceptor will call every interceptor that follows it
-whenever when a retry happens.
+whenever a retry happens.
 
 Please see examples for more advanced use.
 */
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
index 62d8312..003bbd9 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
@@ -5,8 +5,8 @@
 
 import (
 	"context"
-	"fmt"
 	"io"
+	"strconv"
 	"sync"
 	"time"
 
@@ -136,7 +136,6 @@
 type serverStreamingRetryingStream struct {
 	grpc.ClientStream
 	bufferedSends []interface{} // single message that the client can sen
-	receivedGood  bool          // indicates whether any prior receives were successful
 	wasClosedSend bool          // indicates that CloseSend was closed
 	parentCtx     context.Context
 	callOpts      *options
@@ -209,17 +208,8 @@
 }
 
 func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
-	s.mu.RLock()
-	wasGood := s.receivedGood
-	s.mu.RUnlock()
 	err := s.getStream().RecvMsg(m)
 	if err == nil || err == io.EOF {
-		s.mu.Lock()
-		s.receivedGood = true
-		s.mu.Unlock()
-		return false, err
-	} else if wasGood {
-		// previous RecvMsg in the stream succeeded, no retry logic should interfere
 		return false, err
 	}
 	if isContextError(err) {
@@ -303,7 +293,7 @@
 		ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout)
 	}
 	if attempt > 0 && callOpts.includeHeader {
-		mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt))
+		mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10))
 		ctx = mdClone.ToOutgoing(ctx)
 	}
 	return ctx
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
index 1c60585..15225d7 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
@@ -10,7 +10,7 @@
 	"google.golang.org/grpc/metadata"
 )
 
-// NiceMD is a convenience wrapper definiting extra functions on the metadata.
+// NiceMD is a convenience wrapper defining extra functions on the metadata.
 type NiceMD metadata.MD
 
 // ExtractIncoming extracts an inbound metadata from the server-side context.
@@ -39,7 +39,7 @@
 
 // Clone performs a *deep* copy of the metadata.MD.
 //
-// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted
+// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed
 // all keys get copied.
 func (m NiceMD) Clone(copiedKeys ...string) NiceMD {
 	newMd := NiceMD(metadata.Pairs())
@@ -61,7 +61,7 @@
 		newMd[k] = make([]string, len(vv))
 		copy(newMd[k], vv)
 	}
-	return NiceMD(newMd)
+	return newMd
 }
 
 // ToOutgoing sets the given NiceMD as a client-side context for dispatching.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
new file mode 100644
index 0000000..2233cff
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
@@ -0,0 +1,201 @@
+#vendor
+vendor/
+
+# Created by .ignore support plugin (hsz.mobi)
+coverage.txt
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
new file mode 100644
index 0000000..2a845b9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
@@ -0,0 +1,25 @@
+sudo: false
+language: go
+# * github.com/grpc/grpc-go still supports go1.6
+#   - When we drop support for go1.6 we can remove golang.org/x/net/context
+#     below as it is part of the Go std library since go1.7
+# * github.com/prometheus/client_golang already requires at least go1.7 since
+#   September 2017
+go:
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - master
+
+install:
+  - go get github.com/prometheus/client_golang/prometheus
+  - go get google.golang.org/grpc
+  - go get golang.org/x/net/context
+  - go get github.com/stretchr/testify
+script:
+ - make test 
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
new file mode 100644
index 0000000..19a8059
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
@@ -0,0 +1,24 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
+
+### Added
+
+* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
+* Support non-default/global Prometheus registry.
+* Allow configuring counters with `prometheus.CounterOpts`.
+
+### Changed
+
+* Remove usage of deprecated `grpc.Code()`.
+* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
+
+---
+
+This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
@@ -0,0 +1,201 @@
+                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
new file mode 100644
index 0000000..499c583
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
@@ -0,0 +1,247 @@
+# Go gRPC Interceptors for Prometheus monitoring 
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+
+[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
+
+A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
+
+## Interceptors
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
+by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
+common patterns: auth, logging and... monitoring.
+
+To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
+
+## Usage
+
+There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
+
+### Server-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+    // Initialize your gRPC server's interceptor.
+    myServer := grpc.NewServer(
+        grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+        grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+    )
+    // Register your gRPC service implementations.
+    myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
+    // After all your registrations, make sure all of the Prometheus metrics are initialized.
+    grpc_prometheus.Register(myServer)
+    // Register Prometheus metrics handler.    
+    http.Handle("/metrics", promhttp.Handler())
+...
+```
+
+### Client-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+   clientConn, err = grpc.Dial(
+       address,
+		   grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
+		   grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
+   )
+   client = pb_testproto.NewTestServiceClient(clientConn)
+   resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+...
+```
+
+# Metrics
+
+## Labels
+
+All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
+contain the same rich labels:
+  
+  * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
+    the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and 
+     `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
+  * `grpc_method` - the name of the method called on the gRPC service. E.g.  
+    `grpc_method="Ping"`
+  * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). 
+    Differentiating between the two is important especially for latency measurements.
+
+     - `unary` is single request, single response RPC
+     - `client_stream` is a multi-request, single response RPC
+     - `server_stream` is a single request, multi-response RPC
+     - `bidi_stream` is a multi-request, multi-response RPC
+    
+
+Additionally for completed RPCs, the following labels are used:
+
+  * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
+    The list of all statuses is to long, but here are some common ones:
+      
+      - `OK` - means the RPC was successful
+      - `IllegalArgument` - RPC contained bad values
+      - `Internal` - server-side error not disclosed to the clients
+      
+## Counters
+
+The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) 
+the respective Prometheus handler (usually `/metrics`). 
+
+For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
+
+For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
+calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
+
+First, immediately after the server receives the call it will increment the
+`grpc_server_started_total` and start the handling time clock (if histograms are enabled). 
+
+```jsoniq
+grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+Then the user logic gets invoked. It receives one message from the client containing the request 
+(it's a `server_stream`):
+
+```jsoniq
+grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+The user logic may return an error, or send multiple messages back to the client. In this case, on 
+each of the 20 messages sent back, a counter will be incremented:
+
+```jsoniq
+grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
+```
+
+After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
+and the relevant call labels increment the `grpc_server_handled_total` counter.
+
+```jsoniq
+grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+## Histograms
+
+[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
+to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
+of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
+the latency monitoring metrics are disabled by default. To enable them please call the following
+in your server initialization code:
+
+```jsoniq
+grpc_prometheus.EnableHandlingTimeHistogram()
+```
+
+After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
+variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
+
+ * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 
+ * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 
+   calculating average handling times
+ * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
+   handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
+
+The counter values will look as follows:
+
+```jsoniq
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
+grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
+grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+
+## Useful query examples
+
+Prometheus philosophy is to provide raw metrics to the monitoring system, and
+let the aggregations be handled there. The verbosity of above metrics make it possible to have that
+flexibility. Here's a couple of useful monitoring queries:
+
+
+### request inbound rate
+```jsoniq
+sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
+```
+For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
+rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
+how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
+
+### unary request error rate
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+```
+For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the 
+ones that didn't finish with `OK` code.
+
+### unary request error percentage
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
+ * 100.0
+```
+For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
+this is a combination of the two above examples. This is an example of a query you would like to
+[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
+"no more than 1% requests should fail".
+
+### average response stream size
+```jsoniq
+sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+ /
+sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+```
+For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
+server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows 
+you to track when clients started to send "wide" queries that ret
+Note the divisor is the number of started RPCs, in order to account for in-flight requests.
+
+### 99%-tile latency of unary requests
+```jsoniq
+histogram_quantile(0.99, 
+  sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
+)
+```
+For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
+of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
+estimation will take samples in a rolling `5m` window. When combined with other quantiles
+(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system 
+(e.g. impact of caching).
+
+### percentage of slow unary queries (>250ms)
+```jsoniq
+100.0 - (
+sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
+) * 100.0
+```
+For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` 
+seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
+buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
+This is an example of a query you would like to alert on in your system for SLA violations, 
+e.g. "less than 1% of requests are slower than 250ms".
+
+
+## Status
+
+This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services  at [Improbable](https://improbable.io).
+
+## License
+
+`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
new file mode 100644
index 0000000..751a4c7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
@@ -0,0 +1,39 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for client-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	// DefaultClientMetrics is the default instance of ClientMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultClientMetrics = NewClientMetrics()
+
+	// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
+
+	// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
+	prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of
+// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
+// query. This function acts on the DefaultClientMetrics variable and the
+// default Prometheus metrics registry.
+func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
+	prom.Register(DefaultClientMetrics.clientHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
new file mode 100644
index 0000000..9b476f9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
@@ -0,0 +1,170 @@
+package grpc_prometheus
+
+import (
+	"io"
+
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// ClientMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC client.
+type ClientMetrics struct {
+	clientStartedCounter          *prom.CounterVec
+	clientHandledCounter          *prom.CounterVec
+	clientStreamMsgReceived       *prom.CounterVec
+	clientStreamMsgSent           *prom.CounterVec
+	clientHandledHistogramEnabled bool
+	clientHandledHistogramOpts    prom.HistogramOpts
+	clientHandledHistogram        *prom.HistogramVec
+}
+
+// NewClientMetrics returns a ClientMetrics object. Use a new instance of
+// ClientMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
+	opts := counterOptions(counterOpts)
+	return &ClientMetrics{
+		clientStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_started_total",
+				Help: "Total number of RPCs started on the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_handled_total",
+				Help: "Total number of RPCs completed by the client, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+
+		clientStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_received_total",
+				Help: "Total number of RPC stream messages received by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledHistogramEnabled: false,
+		clientHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_client_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+			Buckets: prom.DefBuckets,
+		},
+		clientHandledHistogram: nil,
+	}
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
+	m.clientStartedCounter.Describe(ch)
+	m.clientHandledCounter.Describe(ch)
+	m.clientStreamMsgReceived.Describe(ch)
+	m.clientStreamMsgSent.Describe(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
+	m.clientStartedCounter.Collect(ch)
+	m.clientHandledCounter.Collect(ch)
+	m.clientStreamMsgReceived.Collect(ch)
+	m.clientStreamMsgSent.Collect(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Collect(ch)
+	}
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.clientHandledHistogramOpts)
+	}
+	if !m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram = prom.NewHistogramVec(
+			m.clientHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.clientHandledHistogramEnabled = true
+}
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		monitor := newClientReporter(m, Unary, method)
+		monitor.SentMessage()
+		err := invoker(ctx, method, req, reply, cc, opts...)
+		if err != nil {
+			monitor.ReceivedMessage()
+		}
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		monitor := newClientReporter(m, clientStreamType(desc), method)
+		clientStream, err := streamer(ctx, desc, cc, method, opts...)
+		if err != nil {
+			st, _ := status.FromError(err)
+			monitor.Handled(st.Code())
+			return nil, err
+		}
+		return &monitoredClientStream{clientStream, monitor}, nil
+	}
+}
+
+func clientStreamType(desc *grpc.StreamDesc) grpcType {
+	if desc.ClientStreams && !desc.ServerStreams {
+		return ClientStream
+	} else if !desc.ClientStreams && desc.ServerStreams {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
+type monitoredClientStream struct {
+	grpc.ClientStream
+	monitor *clientReporter
+}
+
+func (s *monitoredClientStream) SendMsg(m interface{}) error {
+	err := s.ClientStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m interface{}) error {
+	err := s.ClientStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	} else if err == io.EOF {
+		s.monitor.Handled(codes.OK)
+	} else {
+		st, _ := status.FromError(err)
+		s.monitor.Handled(st.Code())
+	}
+	return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
new file mode 100644
index 0000000..cbf1532
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type clientReporter struct {
+	metrics     *ClientMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
+	r := &clientReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.clientHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *clientReporter) ReceivedMessage() {
+	r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) SentMessage() {
+	r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) Handled(code codes.Code) {
+	r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.clientHandledHistogramEnabled {
+		r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
new file mode 100644
index 0000000..74c0842
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
@@ -0,0 +1,16 @@
+SHELL="/bin/bash"
+
+GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
+
+all: vet fmt test
+
+fmt:
+	go fmt $(GOFILES_NOVENDOR)
+
+vet:
+	go vet $(GOFILES_NOVENDOR)
+
+test: vet
+	./scripts/test_all.sh
+
+.PHONY: all vet test
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
new file mode 100644
index 0000000..9d51aec
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
@@ -0,0 +1,41 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+// A CounterOption lets you add options to Counter metrics using With* funcs.
+type CounterOption func(*prom.CounterOpts)
+
+type counterOptions []CounterOption
+
+func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
+	for _, f := range co {
+		f(&o)
+	}
+	return o
+}
+
+// WithConstLabels allows you to add ConstLabels to Counter metrics.
+func WithConstLabels(labels prom.Labels) CounterOption {
+	return func(o *prom.CounterOpts) {
+		o.ConstLabels = labels
+	}
+}
+
+// A HistogramOption lets you add options to Histogram metrics using With*
+// funcs.
+type HistogramOption func(*prom.HistogramOpts)
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
+}
+
+// WithHistogramConstLabels allows you to add custom ConstLabels to
+// histograms metrics.
+func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
+	return func(o *prom.HistogramOpts) {
+		o.ConstLabels = labels
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
new file mode 100644
index 0000000..322f990
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for server-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"google.golang.org/grpc"
+)
+
+var (
+	// DefaultServerMetrics is the default instance of ServerMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultServerMetrics = NewServerMetrics()
+
+	// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
+
+	// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
+	prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
+}
+
+// Register takes a gRPC server and pre-initializes all counters to 0. This
+// allows for easier monitoring in Prometheus (no missing metrics), and should
+// be called *after* all services have been registered with the server. This
+// function acts on the DefaultServerMetrics variable.
+func Register(server *grpc.Server) {
+	DefaultServerMetrics.InitializeMetrics(server)
+}
+
+// EnableHandlingTimeHistogram turns on recording of handling time
+// of RPCs. Histogram metrics can be very expensive for Prometheus
+// to retain and query. This function acts on the DefaultServerMetrics
+// variable and the default Prometheus metrics registry.
+func EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
+	prom.Register(DefaultServerMetrics.serverHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
new file mode 100644
index 0000000..5b1467e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
@@ -0,0 +1,185 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/status"
+)
+
+// ServerMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC server.
+type ServerMetrics struct {
+	serverStartedCounter          *prom.CounterVec
+	serverHandledCounter          *prom.CounterVec
+	serverStreamMsgReceived       *prom.CounterVec
+	serverStreamMsgSent           *prom.CounterVec
+	serverHandledHistogramEnabled bool
+	serverHandledHistogramOpts    prom.HistogramOpts
+	serverHandledHistogram        *prom.HistogramVec
+}
+
+// NewServerMetrics returns a ServerMetrics object. Use a new instance of
+// ServerMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
+	opts := counterOptions(counterOpts)
+	return &ServerMetrics{
+		serverStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_started_total",
+				Help: "Total number of RPCs started on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_handled_total",
+				Help: "Total number of RPCs completed on the server, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+		serverStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_received_total",
+				Help: "Total number of RPC stream messages received on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledHistogramEnabled: false,
+		serverHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_server_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+			Buckets: prom.DefBuckets,
+		},
+		serverHandledHistogram: nil,
+	}
+}
+
+// EnableHandlingTimeHistogram enables histograms being registered when
+// registering the ServerMetrics on a Prometheus registry. Histograms can be
+// expensive on Prometheus servers. It takes options to configure histogram
+// options such as the defined buckets.
+func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.serverHandledHistogramOpts)
+	}
+	if !m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram = prom.NewHistogramVec(
+			m.serverHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.serverHandledHistogramEnabled = true
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
+	m.serverStartedCounter.Describe(ch)
+	m.serverHandledCounter.Describe(ch)
+	m.serverStreamMsgReceived.Describe(ch)
+	m.serverStreamMsgSent.Describe(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
+	m.serverStartedCounter.Collect(ch)
+	m.serverHandledCounter.Collect(ch)
+	m.serverStreamMsgReceived.Collect(ch)
+	m.serverStreamMsgSent.Collect(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Collect(ch)
+	}
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		monitor := newServerReporter(m, Unary, info.FullMethod)
+		monitor.ReceivedMessage()
+		resp, err := handler(ctx, req)
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		if err == nil {
+			monitor.SentMessage()
+		}
+		return resp, err
+	}
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
+		err := handler(srv, &monitoredServerStream{ss, monitor})
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// InitializeMetrics initializes all metrics, with their appropriate null
+// value, for all gRPC methods registered on a gRPC server. This is useful, to
+// ensure that all metrics exist when collecting and querying.
+func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
+	serviceInfo := server.GetServiceInfo()
+	for serviceName, info := range serviceInfo {
+		for _, mInfo := range info.Methods {
+			preRegisterMethod(m, serviceName, &mInfo)
+		}
+	}
+}
+
+func streamRPCType(info *grpc.StreamServerInfo) grpcType {
+	if info.IsClientStream && !info.IsServerStream {
+		return ClientStream
+	} else if !info.IsClientStream && info.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
+type monitoredServerStream struct {
+	grpc.ServerStream
+	monitor *serverReporter
+}
+
+func (s *monitoredServerStream) SendMsg(m interface{}) error {
+	err := s.ServerStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m interface{}) error {
+	err := s.ServerStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	}
+	return err
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
+	methodName := mInfo.Name
+	methodType := string(typeFromMethodInfo(mInfo))
+	// These are just references (no increments), as just referencing will create the labels but not set values.
+	metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	if metrics.serverHandledHistogramEnabled {
+		metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	}
+	for _, code := range allCodes {
+		metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
new file mode 100644
index 0000000..aa9db54
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type serverReporter struct {
+	metrics     *ServerMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
+	r := &serverReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.serverHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *serverReporter) ReceivedMessage() {
+	r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) SentMessage() {
+	r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) Handled(code codes.Code) {
+	r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.serverHandledHistogramEnabled {
+		r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
new file mode 100644
index 0000000..7987de3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"strings"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+type grpcType string
+
+const (
+	Unary        grpcType = "unary"
+	ClientStream grpcType = "client_stream"
+	ServerStream grpcType = "server_stream"
+	BidiStream   grpcType = "bidi_stream"
+)
+
+var (
+	allCodes = []codes.Code{
+		codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
+		codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
+		codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
+		codes.Unavailable, codes.DataLoss,
+	}
+)
+
+func splitMethodName(fullMethodName string) (string, string) {
+	fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
+	if i := strings.Index(fullMethodName, "/"); i >= 0 {
+		return fullMethodName[:i], fullMethodName[i+1:]
+	}
+	return "unknown", "unknown"
+}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+	if !mInfo.IsClientStream && !mInfo.IsServerStream {
+		return Unary
+	}
+	if mInfo.IsClientStream && !mInfo.IsServerStream {
+		return ClientStream
+	}
+	if !mInfo.IsClientStream && mInfo.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
new file mode 100644
index 0000000..3645162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
new file mode 100644
index 0000000..d71991e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
@@ -0,0 +1,44 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "options_proto_files",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+)
+
+go_library(
+    name = "options",
+    embed = [":options_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+)
+
+proto_library(
+    name = "options_proto",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+    deps = [
+        "@com_google_protobuf//:descriptor_proto",
+        "@com_google_protobuf//:struct_proto",
+    ],
+)
+
+go_proto_library(
+    name = "options_go_proto",
+    compilers = ["//:go_apiv2"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+    proto = ":options_proto",
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":options",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
new file mode 100644
index 0000000..738c975
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FileOptions)(nil),
+		ExtensionType: (*Swagger)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+		Tag:           "bytes,1042,opt,name=openapiv2_swagger",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
+		ExtensionType: (*Operation)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+		Tag:           "bytes,1042,opt,name=openapiv2_operation",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+		Tag:           "bytes,1042,opt,name=openapiv2_schema",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.EnumOptions)(nil),
+		ExtensionType: (*EnumSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+		Tag:           "bytes,1042,opt,name=openapiv2_enum",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
+		ExtensionType: (*Tag)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+		Tag:           "bytes,1042,opt,name=openapiv2_tag",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*JSONSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+		Tag:           "bytes,1042,opt,name=openapiv2_field",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+	E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+	E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+	E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+	E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+	E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+	E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+	0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+	0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+	0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+	0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+	0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+	0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+	0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+	(*descriptorpb.FileOptions)(nil),    // 0: google.protobuf.FileOptions
+	(*descriptorpb.MethodOptions)(nil),  // 1: google.protobuf.MethodOptions
+	(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+	(*descriptorpb.EnumOptions)(nil),    // 3: google.protobuf.EnumOptions
+	(*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+	(*descriptorpb.FieldOptions)(nil),   // 5: google.protobuf.FieldOptions
+	(*Swagger)(nil),                     // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                   // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Schema)(nil),                      // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                  // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*Tag)(nil),                         // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*JSONSchema)(nil),                  // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+	0,  // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+	1,  // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+	2,  // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+	3,  // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+	4,  // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+	5,  // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+	6,  // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	7,  // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+	8,  // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	9,  // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	12, // [12:12] is the sub-list for method output_type
+	12, // [12:12] is the sub-list for method input_type
+	6,  // [6:12] is the sub-list for extension type_name
+	0,  // [0:6] is the sub-list for extension extendee
+	0,  // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+	if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+		return
+	}
+	file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 6,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+		ExtensionInfos:    file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_annotations_proto = out.File
+	file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
new file mode 100644
index 0000000..aecc5e7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/descriptor.proto";
+import "protoc-gen-openapiv2/options/openapiv2.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+extend google.protobuf.FileOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Swagger openapiv2_swagger = 1042;
+}
+extend google.protobuf.MethodOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Operation openapiv2_operation = 1042;
+}
+extend google.protobuf.MessageOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Schema openapiv2_schema = 1042;
+}
+extend google.protobuf.EnumOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  EnumSchema openapiv2_enum = 1042;
+}
+extend google.protobuf.ServiceOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Tag openapiv2_tag = 1042;
+}
+extend google.protobuf.FieldOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  JSONSchema openapiv2_field = 1042;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
new file mode 100644
index 0000000..b570167
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FileOptions)(nil),
+		ExtensionType: (*Swagger)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+		Tag:           "bytes,1042,opt,name=openapiv2_swagger",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
+		ExtensionType: (*Operation)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+		Tag:           "bytes,1042,opt,name=openapiv2_operation",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+		Tag:           "bytes,1042,opt,name=openapiv2_schema",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.EnumOptions)(nil),
+		ExtensionType: (*EnumSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+		Tag:           "bytes,1042,opt,name=openapiv2_enum",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
+		ExtensionType: (*Tag)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+		Tag:           "bytes,1042,opt,name=openapiv2_tag",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*JSONSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+		Tag:           "bytes,1042,opt,name=openapiv2_field",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+	E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+	E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+	E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+	E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+	E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+	E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+	0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+	0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+	0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+	0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+	0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+	0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+	0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+	(*descriptorpb.FileOptions)(nil),    // 0: google.protobuf.FileOptions
+	(*descriptorpb.MethodOptions)(nil),  // 1: google.protobuf.MethodOptions
+	(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+	(*descriptorpb.EnumOptions)(nil),    // 3: google.protobuf.EnumOptions
+	(*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+	(*descriptorpb.FieldOptions)(nil),   // 5: google.protobuf.FieldOptions
+	(*Swagger)(nil),                     // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                   // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Schema)(nil),                      // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                  // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*Tag)(nil),                         // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*JSONSchema)(nil),                  // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+	0,  // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+	1,  // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+	2,  // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+	3,  // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+	4,  // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+	5,  // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+	6,  // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	7,  // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+	8,  // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	9,  // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	12, // [12:12] is the sub-list for method output_type
+	12, // [12:12] is the sub-list for method input_type
+	6,  // [6:12] is the sub-list for extension type_name
+	0,  // [0:6] is the sub-list for extension extendee
+	0,  // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+	if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+		return
+	}
+	file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 6,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+		ExtensionInfos:    file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_annotations_proto = out.File
+	file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
new file mode 100644
index 0000000..07dfb95
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
@@ -0,0 +1,7 @@
+version: v2
+plugins:
+  - remote: buf.build/protocolbuffers/go:v1.36.0
+    out: .
+    opt:
+      - paths=source_relative
+      - default_api_level=API_HYBRID
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
new file mode 100644
index 0000000..3a34e66
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
@@ -0,0 +1,4263 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+	Scheme_UNKNOWN Scheme = 0
+	Scheme_HTTP    Scheme = 1
+	Scheme_HTTPS   Scheme = 2
+	Scheme_WS      Scheme = 3
+	Scheme_WSS     Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+	Scheme_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "HTTP",
+		2: "HTTPS",
+		3: "WS",
+		4: "WSS",
+	}
+	Scheme_value = map[string]int32{
+		"UNKNOWN": 0,
+		"HTTP":    1,
+		"HTTPS":   2,
+		"WS":      3,
+		"WSS":     4,
+	}
+)
+
+func (x Scheme) Enum() *Scheme {
+	p := new(Scheme)
+	*p = x
+	return p
+}
+
+func (x Scheme) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+	HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+	HeaderParameter_STRING  HeaderParameter_Type = 1
+	HeaderParameter_NUMBER  HeaderParameter_Type = 2
+	HeaderParameter_INTEGER HeaderParameter_Type = 3
+	HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+	HeaderParameter_Type_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "STRING",
+		2: "NUMBER",
+		3: "INTEGER",
+		4: "BOOLEAN",
+	}
+	HeaderParameter_Type_value = map[string]int32{
+		"UNKNOWN": 0,
+		"STRING":  1,
+		"NUMBER":  2,
+		"INTEGER": 3,
+		"BOOLEAN": 4,
+	}
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+	p := new(HeaderParameter_Type)
+	*p = x
+	return p
+}
+
+func (x HeaderParameter_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+	JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+	JSONSchema_ARRAY   JSONSchema_JSONSchemaSimpleTypes = 1
+	JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+	JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+	JSONSchema_NULL    JSONSchema_JSONSchemaSimpleTypes = 4
+	JSONSchema_NUMBER  JSONSchema_JSONSchemaSimpleTypes = 5
+	JSONSchema_OBJECT  JSONSchema_JSONSchemaSimpleTypes = 6
+	JSONSchema_STRING  JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+	JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "ARRAY",
+		2: "BOOLEAN",
+		3: "INTEGER",
+		4: "NULL",
+		5: "NUMBER",
+		6: "OBJECT",
+		7: "STRING",
+	}
+	JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+		"UNKNOWN": 0,
+		"ARRAY":   1,
+		"BOOLEAN": 2,
+		"INTEGER": 3,
+		"NULL":    4,
+		"NUMBER":  5,
+		"OBJECT":  6,
+		"STRING":  7,
+	}
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+	p := new(JSONSchema_JSONSchemaSimpleTypes)
+	*p = x
+	return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+	SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+	SecurityScheme_TYPE_BASIC   SecurityScheme_Type = 1
+	SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+	SecurityScheme_TYPE_OAUTH2  SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+	SecurityScheme_Type_name = map[int32]string{
+		0: "TYPE_INVALID",
+		1: "TYPE_BASIC",
+		2: "TYPE_API_KEY",
+		3: "TYPE_OAUTH2",
+	}
+	SecurityScheme_Type_value = map[string]int32{
+		"TYPE_INVALID": 0,
+		"TYPE_BASIC":   1,
+		"TYPE_API_KEY": 2,
+		"TYPE_OAUTH2":  3,
+	}
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+	p := new(SecurityScheme_Type)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+	SecurityScheme_IN_INVALID SecurityScheme_In = 0
+	SecurityScheme_IN_QUERY   SecurityScheme_In = 1
+	SecurityScheme_IN_HEADER  SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+	SecurityScheme_In_name = map[int32]string{
+		0: "IN_INVALID",
+		1: "IN_QUERY",
+		2: "IN_HEADER",
+	}
+	SecurityScheme_In_value = map[string]int32{
+		"IN_INVALID": 0,
+		"IN_QUERY":   1,
+		"IN_HEADER":  2,
+	}
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+	p := new(SecurityScheme_In)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_In) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+	SecurityScheme_FLOW_INVALID     SecurityScheme_Flow = 0
+	SecurityScheme_FLOW_IMPLICIT    SecurityScheme_Flow = 1
+	SecurityScheme_FLOW_PASSWORD    SecurityScheme_Flow = 2
+	SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+	SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+	SecurityScheme_Flow_name = map[int32]string{
+		0: "FLOW_INVALID",
+		1: "FLOW_IMPLICIT",
+		2: "FLOW_PASSWORD",
+		3: "FLOW_APPLICATION",
+		4: "FLOW_ACCESS_CODE",
+	}
+	SecurityScheme_Flow_value = map[string]int32{
+		"FLOW_INVALID":     0,
+		"FLOW_IMPLICIT":    1,
+		"FLOW_PASSWORD":    2,
+		"FLOW_APPLICATION": 3,
+		"FLOW_ACCESS_CODE": 4,
+	}
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+	p := new(SecurityScheme_Flow)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  schemes: HTTPS;
+//	  consumes: "application/json";
+//	  produces: "application/json";
+//	};
+type Swagger struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+	*x = Swagger{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+	if x != nil {
+		return x.Swagger
+	}
+	return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+func (x *Swagger) GetHost() string {
+	if x != nil {
+		return x.Host
+	}
+	return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+	if x != nil {
+		return x.BasePath
+	}
+	return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+	if x != nil {
+		return x.Schemes
+	}
+	return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+	if x != nil {
+		return x.Consumes
+	}
+	return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+	if x != nil {
+		return x.Produces
+	}
+	return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.Responses
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+	if x != nil {
+		return x.SecurityDefinitions
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+	if x != nil {
+		return x.Tags
+	}
+	return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+	x.Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+	x.Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+	x.Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+	x.BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+	x.Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+	x.Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+	x.Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+	x.Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+	x.SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+	x.Security = v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+	x.Tags = v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+	if x == nil {
+		return false
+	}
+	return x.Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+	if x == nil {
+		return false
+	}
+	return x.SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+	x.Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+	x.SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+	m0 := &Swagger{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Swagger = b.Swagger
+	x.Info = b.Info
+	x.Host = b.Host
+	x.BasePath = b.BasePath
+	x.Schemes = b.Schemes
+	x.Consumes = b.Consumes
+	x.Produces = b.Produces
+	x.Responses = b.Responses
+	x.SecurityDefinitions = b.SecurityDefinitions
+	x.Security = b.Security
+	x.Tags = b.Tags
+	x.ExternalDocs = b.ExternalDocs
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//	service EchoService {
+//	  rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//	    option (google.api.http) = {
+//	      get: "/v1/example/echo/{id}"
+//	    };
+//
+//	    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//	      summary: "Get a message.";
+//	      operation_id: "getMessage";
+//	      tags: "echo";
+//	      responses: {
+//	        key: "200"
+//	          value: {
+//	          description: "OK";
+//	        }
+//	      }
+//	    };
+//	  }
+//	}
+type Operation struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters    *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+	*x = Operation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+	if x != nil {
+		return x.Tags
+	}
+	return nil
+}
+
+func (x *Operation) GetSummary() string {
+	if x != nil {
+		return x.Summary
+	}
+	return ""
+}
+
+func (x *Operation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Operation) GetOperationId() string {
+	if x != nil {
+		return x.OperationId
+	}
+	return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+	if x != nil {
+		return x.Consumes
+	}
+	return nil
+}
+
+func (x *Operation) GetProduces() []string {
+	if x != nil {
+		return x.Produces
+	}
+	return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.Responses
+	}
+	return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+	if x != nil {
+		return x.Schemes
+	}
+	return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+	if x != nil {
+		return x.Deprecated
+	}
+	return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+	if x != nil {
+		return x.Parameters
+	}
+	return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+	x.Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+	x.Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+	x.OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+	x.Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+	x.Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+	x.Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+	x.Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+	x.Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+	x.Security = v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+	x.Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+	if x == nil {
+		return false
+	}
+	return x.Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+	x.Parameters = nil
+}
+
+type Operation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+	m0 := &Operation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Tags = b.Tags
+	x.Summary = b.Summary
+	x.Description = b.Description
+	x.ExternalDocs = b.ExternalDocs
+	x.OperationId = b.OperationId
+	x.Consumes = b.Consumes
+	x.Produces = b.Produces
+	x.Responses = b.Responses
+	x.Schemes = b.Schemes
+	x.Deprecated = b.Deprecated
+	x.Security = b.Security
+	x.Extensions = b.Extensions
+	x.Parameters = b.Parameters
+	return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers       []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+	*x = Parameters{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+	if x != nil {
+		return x.Headers
+	}
+	return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+	x.Headers = v
+}
+
+type Parameters_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+	m0 := &Parameters{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Headers = b.Headers
+	return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Name` is the header name.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// `Description` is a short description of the header.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+	// `Format` The extending format for the previously mentioned type.
+	Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+	// `Required` indicates if the header is optional
+	Required      bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+	*x = HeaderParameter{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+	if x != nil {
+		return x.Type
+	}
+	return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+	if x != nil {
+		return x.Required
+	}
+	return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+	x.Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+	x.Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+	x.Required = v
+}
+
+type HeaderParameter_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Name` is the header name.
+	Name string
+	// `Description` is a short description of the header.
+	Description string
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Required` indicates if the header is optional
+	Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+	m0 := &HeaderParameter{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Description = b.Description
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Required = b.Required
+	return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Description` is a short description of the header.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	// `Format` The extending format for the previously mentioned type.
+	Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern       string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+	*x = Header{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Header) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *Header) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *Header) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *Header) GetPattern() string {
+	if x != nil {
+		return x.Pattern
+	}
+	return ""
+}
+
+func (x *Header) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Header) SetType(v string) {
+	x.Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+	x.Pattern = v
+}
+
+type Header_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the header.
+	Description string
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+	m0 := &Header{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Default = b.Default
+	x.Pattern = b.Pattern
+	return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+	*x = Response{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+	if x != nil {
+		return x.Schema
+	}
+	return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+	if x != nil {
+		return x.Headers
+	}
+	return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+	if x != nil {
+		return x.Examples
+	}
+	return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Response) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+	x.Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+	x.Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+	x.Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+	x.Schema = nil
+}
+
+type Response_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+	m0 := &Response{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Schema = b.Schema
+	x.Headers = b.Headers
+	x.Examples = b.Examples
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  ...
+//	};
+type Info struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The title of the application.
+	Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// The Terms of Service for the API.
+	TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+	// The contact information for the exposed API.
+	Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+	// The license information for the exposed API.
+	License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+	*x = Info{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *Info) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+	if x != nil {
+		return x.TermsOfService
+	}
+	return ""
+}
+
+func (x *Info) GetContact() *Contact {
+	if x != nil {
+		return x.Contact
+	}
+	return nil
+}
+
+func (x *Info) GetLicense() *License {
+	if x != nil {
+		return x.License
+	}
+	return nil
+}
+
+func (x *Info) GetVersion() string {
+	if x != nil {
+		return x.Version
+	}
+	return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Info) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+	x.TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+	x.Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+	x.License = v
+}
+
+func (x *Info) SetVersion(v string) {
+	x.Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+	if x == nil {
+		return false
+	}
+	return x.Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+	if x == nil {
+		return false
+	}
+	return x.License != nil
+}
+
+func (x *Info) ClearContact() {
+	x.Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+	x.License = nil
+}
+
+type Info_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The title of the application.
+	Title string
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string
+	// The Terms of Service for the API.
+	TermsOfService string
+	// The contact information for the exposed API.
+	Contact *Contact
+	// The license information for the exposed API.
+	License *License
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+	m0 := &Info{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Title = b.Title
+	x.Description = b.Description
+	x.TermsOfService = b.TermsOfService
+	x.Contact = b.Contact
+	x.License = b.License
+	x.Version = b.Version
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type Contact struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The identifying name of the contact person/organization.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email         string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+	*x = Contact{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Contact) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *Contact) GetEmail() string {
+	if x != nil {
+		return x.Email
+	}
+	return ""
+}
+
+func (x *Contact) SetName(v string) {
+	x.Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+	x.Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+	x.Email = v
+}
+
+type Contact_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The identifying name of the contact person/organization.
+	Name string
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+	m0 := &Contact{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Url = b.Url
+	x.Email = b.Email
+	return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type License struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The license name used for the API.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url           string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+	*x = License{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *License) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *License) SetName(v string) {
+	x.Name = v
+}
+
+func (x *License) SetUrl(v string) {
+	x.Url = v
+}
+
+type License_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The license name used for the API.
+	Name string
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url string
+}
+
+func (b0 License_builder) Build() *License {
+	m0 := &License{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Url = b.Url
+	return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  ...
+//	  external_docs: {
+//	    description: "More about gRPC-Gateway";
+//	    url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	  }
+//	  ...
+//	};
+type ExternalDocumentation struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url           string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+	*x = ExternalDocumentation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+	x.Url = v
+}
+
+type ExternalDocumentation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+	m0 := &ExternalDocumentation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Url = b.Url
+	return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+	state      protoimpl.MessageState `protogen:"hybrid.v1"`
+	JsonSchema *JSONSchema            `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example       string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+	*x = Schema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+	if x != nil {
+		return x.JsonSchema
+	}
+	return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+	if x != nil {
+		return x.Discriminator
+	}
+	return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Schema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+	x.JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+	x.Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+	x.JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Schema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	JsonSchema *JSONSchema
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+	m0 := &Schema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.JsonSchema = b.JsonSchema
+	x.Discriminator = b.Discriminator
+	x.ReadOnly = b.ReadOnly
+	x.ExternalDocs = b.ExternalDocs
+	x.Example = b.Example
+	return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//	  ...
+//	  title: "MyEnum";
+//	  description:"This is my nice enum";
+//	  example: "ZERO";
+//	  required: true;
+//	  ...
+//	};
+type EnumSchema struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A short description of the schema.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	Default     string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+	// The title of the schema.
+	Title    string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+	Required bool   `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+	ReadOnly bool   `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	Example      string                 `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+	*x = EnumSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+	if x != nil {
+		return x.Required
+	}
+	return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+	x.Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+	x.Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the schema.
+	Description string
+	Default     string
+	// The title of the schema.
+	Title    string
+	Required bool
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	Example      string
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+	m0 := &EnumSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Default = b.Default
+	x.Title = b.Title
+	x.Required = b.Required
+	x.ReadOnly = b.ReadOnly
+	x.ExternalDocs = b.ExternalDocs
+	x.Example = b.Example
+	x.Ref = b.Ref
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//	message SimpleMessage {
+//	  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//	    json_schema: {
+//	      title: "SimpleMessage"
+//	      description: "A simple message."
+//	      required: ["id"]
+//	    }
+//	  };
+//
+//	  // Id represents the message identifier.
+//	  string id = 1; [
+//	      (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//	        description: "The unique identifier of the simple message."
+//	      }];
+//	}
+type JSONSchema struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	// The title of the schema.
+	Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+	// A short description of the schema.
+	Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	Default     string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+	ReadOnly    bool   `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string  `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+	MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+	ExclusiveMaximum bool    `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64  `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+	ExclusiveMinimum bool     `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+	MaxLength        uint64   `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+	MinLength        uint64   `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+	Pattern          string   `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	MaxItems         uint64   `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+	MinItems         uint64   `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+	UniqueItems      bool     `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+	MaxProperties    uint64   `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+	MinProperties    uint64   `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+	Required         []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+	// Items in 'array' must be unique.
+	Array []string                           `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+	Type  []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+	// `Format`
+	Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+	*x = JSONSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *JSONSchema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+	if x != nil {
+		return x.MultipleOf
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+	if x != nil {
+		return x.Maximum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+	if x != nil {
+		return x.ExclusiveMaximum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+	if x != nil {
+		return x.Minimum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+	if x != nil {
+		return x.ExclusiveMinimum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+	if x != nil {
+		return x.MaxLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+	if x != nil {
+		return x.MinLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+	if x != nil {
+		return x.Pattern
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+	if x != nil {
+		return x.MaxItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+	if x != nil {
+		return x.MinItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+	if x != nil {
+		return x.UniqueItems
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+	if x != nil {
+		return x.MaxProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+	if x != nil {
+		return x.MinProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+	if x != nil {
+		return x.Required
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+	if x != nil {
+		return x.Array
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+	if x != nil {
+		return x.Type
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+	if x != nil {
+		return x.Enum
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+	if x != nil {
+		return x.FieldConfiguration
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+	x.Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+	x.MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+	x.Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+	x.ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+	x.Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+	x.ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+	x.MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+	x.MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+	x.Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+	x.MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+	x.MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+	x.UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+	x.MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+	x.MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+	x.Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+	x.Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+	x.Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+	x.Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+	x.FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+	if x == nil {
+		return false
+	}
+	return x.FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+	x.FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// The title of the schema.
+	Title string
+	// A short description of the schema.
+	Description string
+	Default     string
+	ReadOnly    bool
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string
+	MultipleOf float64
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64
+	ExclusiveMaximum bool
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64
+	ExclusiveMinimum bool
+	MaxLength        uint64
+	MinLength        uint64
+	Pattern          string
+	MaxItems         uint64
+	MinItems         uint64
+	UniqueItems      bool
+	MaxProperties    uint64
+	MinProperties    uint64
+	Required         []string
+	// Items in 'array' must be unique.
+	Array []string
+	Type  []JSONSchema_JSONSchemaSimpleTypes
+	// `Format`
+	Format string
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+	m0 := &JSONSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Ref = b.Ref
+	x.Title = b.Title
+	x.Description = b.Description
+	x.Default = b.Default
+	x.ReadOnly = b.ReadOnly
+	x.Example = b.Example
+	x.MultipleOf = b.MultipleOf
+	x.Maximum = b.Maximum
+	x.ExclusiveMaximum = b.ExclusiveMaximum
+	x.Minimum = b.Minimum
+	x.ExclusiveMinimum = b.ExclusiveMinimum
+	x.MaxLength = b.MaxLength
+	x.MinLength = b.MinLength
+	x.Pattern = b.Pattern
+	x.MaxItems = b.MaxItems
+	x.MinItems = b.MinItems
+	x.UniqueItems = b.UniqueItems
+	x.MaxProperties = b.MaxProperties
+	x.MinProperties = b.MinProperties
+	x.Required = b.Required
+	x.Array = b.Array
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Enum = b.Enum
+	x.FieldConfiguration = b.FieldConfiguration
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+	*x = Tag{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Tag) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Tag) SetName(v string) {
+	x.Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Tag_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+	m0 := &Tag{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Description = b.Description
+	x.ExternalDocs = b.ExternalDocs
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security      map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+	*x = SecurityDefinitions{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+	x.Security = v
+}
+
+type SecurityDefinitions_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+	m0 := &SecurityDefinitions{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Security = b.Security
+	return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+	// A short description for security scheme.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+	*x = SecurityScheme{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+	if x != nil {
+		return x.Type
+	}
+	return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+	if x != nil {
+		return x.In
+	}
+	return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+	if x != nil {
+		return x.Flow
+	}
+	return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+	if x != nil {
+		return x.AuthorizationUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+	if x != nil {
+		return x.TokenUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+	if x != nil {
+		return x.Scopes
+	}
+	return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+	x.Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+	x.Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+	x.In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+	x.Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+	x.AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+	x.TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+	x.Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+	if x == nil {
+		return false
+	}
+	return x.Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+	x.Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type
+	// A short description for security scheme.
+	Description string
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+	m0 := &SecurityScheme{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Type = b.Type
+	x.Description = b.Description
+	x.Name = b.Name
+	x.In = b.In
+	x.Flow = b.Flow
+	x.AuthorizationUrl = b.AuthorizationUrl
+	x.TokenUrl = b.TokenUrl
+	x.Scopes = b.Scopes
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+	*x = SecurityRequirement{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+	if x != nil {
+		return x.SecurityRequirement
+	}
+	return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+	x.SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+	m0 := &SecurityRequirement{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.SecurityRequirement = b.SecurityRequirement
+	return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope         map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+	*x = Scopes{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+	x.Scope = v
+}
+
+type Scopes_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+	m0 := &Scopes{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Scope = b.Scope
+	return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+	*x = JSONSchema_FieldConfiguration{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+	if x != nil {
+		return x.PathParamName
+	}
+	return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+	x.PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+	m0 := &JSONSchema_FieldConfiguration{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.PathParamName = b.PathParamName
+	return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+	state         protoimpl.MessageState `protogen:"hybrid.v1"`
+	Scope         []string               `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+	*x = SecurityRequirement_SecurityRequirementValue{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+	x.Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+	m0 := &SecurityRequirement_SecurityRequirementValue{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Scope = b.Scope
+	return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+	0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+	0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+	0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+	0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+	0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+	0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+	0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+	0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+	0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+	0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+	0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+	0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+	0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+	0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+	0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+	0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+	0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+	0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+	0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+	0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+	0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+	0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+	0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+	0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+	0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+	0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+	0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+	0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+	0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+	0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+	0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+	0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+	0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+	0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+	0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+	0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+	0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+	0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+	0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+	0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+	0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+	0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+	0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+	0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+	0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+	0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+	0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+	0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+	0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+	0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+	0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+	0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+	0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+	0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+	0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+	0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+	0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+	0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+	0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+	0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+	0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+	0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+	0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+	0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+	0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+	0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+	0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+	0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+	0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+	0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+	0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+	0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+	0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+	0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+	0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+	0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+	0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+	0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+	0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+	0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+	0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+	0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+	0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+	0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+	0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+	0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+	0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+	0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+	0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+	0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+	0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+	0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+	0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+	0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+	0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+	0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+	0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+	0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+	0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+	0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+	0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+	0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+	0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+	0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+	0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+	0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+	0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+	0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+	0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+	0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+	0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+	0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+	0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+	0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+	0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+	0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+	0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+	0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+	0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+	0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+	0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+	0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+	0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+	0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+	0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+	0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+	0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+	0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+	0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+	0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+	0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+	0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+	(Scheme)(0),                           // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	(HeaderParameter_Type)(0),             // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	(JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	(SecurityScheme_Type)(0),              // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	(SecurityScheme_In)(0),                // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	(SecurityScheme_Flow)(0),              // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	(*Swagger)(nil),                       // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                     // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Parameters)(nil),                    // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	(*HeaderParameter)(nil),               // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	(*Header)(nil),                        // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+	(*Response)(nil),                      // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+	(*Info)(nil),                          // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+	(*Contact)(nil),                       // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+	(*License)(nil),                       // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+	(*ExternalDocumentation)(nil),         // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	(*Schema)(nil),                        // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                    // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*JSONSchema)(nil),                    // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	(*Tag)(nil),                           // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*SecurityDefinitions)(nil),           // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	(*SecurityScheme)(nil),                // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	(*SecurityRequirement)(nil),           // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	(*Scopes)(nil),                        // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	nil,                                   // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	nil,                                   // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	nil,                                   // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	nil,                                   // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	nil,                                   // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	nil,                                   // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	nil,                                   // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	nil,                                   // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	nil,                                   // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	(*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	nil,                                   // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	nil,                                   // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	nil,                                   // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	nil,                                   // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	(*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	nil,                    // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	nil,                    // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	(*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+	12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+	0,  // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	0,  // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	8,  // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	9,  // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	1,  // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+	14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+	31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	2,  // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	3,  // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	4,  // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	5,  // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+	41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	53, // [53:53] is the sub-list for method output_type
+	53, // [53:53] is the sub-list for method input_type
+	53, // [53:53] is the sub-list for extension type_name
+	53, // [53:53] is the sub-list for extension extendee
+	0,  // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+	if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+			NumEnums:      6,
+			NumMessages:   35,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+		EnumInfos:         file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+		MessageInfos:      file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+	file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
new file mode 100644
index 0000000..5313f08
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
@@ -0,0 +1,759 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/struct.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+enum Scheme {
+  UNKNOWN = 0;
+  HTTP = 1;
+  HTTPS = 2;
+  WS = 3;
+  WSS = 4;
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      title: "Echo API";
+//      version: "1.0";
+//      description: "";
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//    };
+//    schemes: HTTPS;
+//    consumes: "application/json";
+//    produces: "application/json";
+//  };
+//
+message Swagger {
+  // Specifies the OpenAPI Specification version being used. It can be
+  // used by the OpenAPI UI and other clients to interpret the API listing. The
+  // value MUST be "2.0".
+  string swagger = 1;
+  // Provides metadata about the API. The metadata can be used by the
+  // clients if needed.
+  Info info = 2;
+  // The host (name or ip) serving the API. This MUST be the host only and does
+  // not include the scheme nor sub-paths. It MAY include a port. If the host is
+  // not included, the host serving the documentation is to be used (including
+  // the port). The host does not support path templating.
+  string host = 3;
+  // The base path on which the API is served, which is relative to the host. If
+  // it is not included, the API is served directly under the host. The value
+  // MUST start with a leading slash (/). The basePath does not support path
+  // templating.
+  // Note that using `base_path` does not change the endpoint paths that are
+  // generated in the resulting OpenAPI file. If you wish to use `base_path`
+  // with relatively generated OpenAPI paths, the `base_path` prefix must be
+  // manually removed from your `google.api.http` paths and your code changed to
+  // serve the API from the `base_path`.
+  string base_path = 4;
+  // The transfer protocol of the API. Values MUST be from the list: "http",
+  // "https", "ws", "wss". If the schemes is not included, the default scheme to
+  // be used is the one used to access the OpenAPI definition itself.
+  repeated Scheme schemes = 5;
+  // A list of MIME types the APIs can consume. This is global to all APIs but
+  // can be overridden on specific API calls. Value MUST be as described under
+  // Mime Types.
+  repeated string consumes = 6;
+  // A list of MIME types the APIs can produce. This is global to all APIs but
+  // can be overridden on specific API calls. Value MUST be as described under
+  // Mime Types.
+  repeated string produces = 7;
+  // field 8 is reserved for 'paths'.
+  reserved 8;
+  // field 9 is reserved for 'definitions', which at this time are already
+  // exposed as and customizable as proto messages.
+  reserved 9;
+  // An object to hold responses that can be used across operations. This
+  // property does not define global responses for all operations.
+  map<string, Response> responses = 10;
+  // Security scheme definitions that can be used across the specification.
+  SecurityDefinitions security_definitions = 11;
+  // A declaration of which security schemes are applied for the API as a whole.
+  // The list of values describes alternative security schemes that can be used
+  // (that is, there is a logical OR between the security requirements).
+  // Individual operations can override this definition.
+  repeated SecurityRequirement security = 12;
+  // A list of tags for API documentation control. Tags can be used for logical
+  // grouping of operations by resources or any other qualifier.
+  repeated Tag tags = 13;
+  // Additional external documentation.
+  ExternalDocumentation external_docs = 14;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 15;
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//  service EchoService {
+//    rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//      option (google.api.http) = {
+//        get: "/v1/example/echo/{id}"
+//      };
+//
+//      option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//        summary: "Get a message.";
+//        operation_id: "getMessage";
+//        tags: "echo";
+//        responses: {
+//          key: "200"
+//            value: {
+//            description: "OK";
+//          }
+//        }
+//      };
+//    }
+//  }
+message Operation {
+  // A list of tags for API documentation control. Tags can be used for logical
+  // grouping of operations by resources or any other qualifier.
+  repeated string tags = 1;
+  // A short summary of what the operation does. For maximum readability in the
+  // swagger-ui, this field SHOULD be less than 120 characters.
+  string summary = 2;
+  // A verbose explanation of the operation behavior. GFM syntax can be used for
+  // rich text representation.
+  string description = 3;
+  // Additional external documentation for this operation.
+  ExternalDocumentation external_docs = 4;
+  // Unique string used to identify the operation. The id MUST be unique among
+  // all operations described in the API. Tools and libraries MAY use the
+  // operationId to uniquely identify an operation, therefore, it is recommended
+  // to follow common programming naming conventions.
+  string operation_id = 5;
+  // A list of MIME types the operation can consume. This overrides the consumes
+  // definition at the OpenAPI Object. An empty value MAY be used to clear the
+  // global definition. Value MUST be as described under Mime Types.
+  repeated string consumes = 6;
+  // A list of MIME types the operation can produce. This overrides the produces
+  // definition at the OpenAPI Object. An empty value MAY be used to clear the
+  // global definition. Value MUST be as described under Mime Types.
+  repeated string produces = 7;
+  // field 8 is reserved for 'parameters'.
+  reserved 8;
+  // The list of possible responses as they are returned from executing this
+  // operation.
+  map<string, Response> responses = 9;
+  // The transfer protocol for the operation. Values MUST be from the list:
+  // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+  // schemes definition.
+  repeated Scheme schemes = 10;
+  // Declares this operation to be deprecated. Usage of the declared operation
+  // should be refrained. Default value is false.
+  bool deprecated = 11;
+  // A declaration of which security schemes are applied for this operation. The
+  // list of values describes alternative security schemes that can be used
+  // (that is, there is a logical OR between the security requirements). This
+  // definition overrides any declared top-level security. To remove a top-level
+  // security declaration, an empty array can be used.
+  repeated SecurityRequirement security = 12;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 13;
+  // Custom parameters such as HTTP request headers.
+  // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+  // and https://swagger.io/specification/v2/#parameter-object.
+  Parameters parameters = 14;
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+message Parameters {
+  // `Headers` is one or more HTTP header parameter.
+  // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+  repeated HeaderParameter headers = 1;
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+message HeaderParameter {
+  // `Type` is a supported HTTP header type.
+  // See https://swagger.io/specification/v2/#parameterType.
+  enum Type {
+    UNKNOWN = 0;
+    STRING = 1;
+    NUMBER = 2;
+    INTEGER = 3;
+    BOOLEAN = 4;
+  }
+
+  // `Name` is the header name.
+  string name = 1;
+  // `Description` is a short description of the header.
+  string description = 2;
+  // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+  // See: https://swagger.io/specification/v2/#parameterType.
+  Type type = 3;
+  // `Format` The extending format for the previously mentioned type.
+  string format = 4;
+  // `Required` indicates if the header is optional
+  bool required = 5;
+  // field 6 is reserved for 'items', but in OpenAPI-specific way.
+  reserved 6;
+  // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used.
+  reserved 7;
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+//
+message Header {
+  // `Description` is a short description of the header.
+  string description = 1;
+  // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+  string type = 2;
+  // `Format` The extending format for the previously mentioned type.
+  string format = 3;
+  // field 4 is reserved for 'items', but in OpenAPI-specific way.
+  reserved 4;
+  // field 5 is reserved `Collection Format` Determines the format of the array if type array is used.
+  reserved 5;
+  // `Default` Declares the value of the header that the server will use if none is provided.
+  // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+  // Unlike JSON Schema this value MUST conform to the defined type for the header.
+  string default = 6;
+  // field 7 is reserved for 'maximum'.
+  reserved 7;
+  // field 8 is reserved for 'exclusiveMaximum'.
+  reserved 8;
+  // field 9 is reserved for 'minimum'.
+  reserved 9;
+  // field 10 is reserved for 'exclusiveMinimum'.
+  reserved 10;
+  // field 11 is reserved for 'maxLength'.
+  reserved 11;
+  // field 12 is reserved for 'minLength'.
+  reserved 12;
+  // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+  string pattern = 13;
+  // field 14 is reserved for 'maxItems'.
+  reserved 14;
+  // field 15 is reserved for 'minItems'.
+  reserved 15;
+  // field 16 is reserved for 'uniqueItems'.
+  reserved 16;
+  // field 17 is reserved for 'enum'.
+  reserved 17;
+  // field 18 is reserved for 'multipleOf'.
+  reserved 18;
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+message Response {
+  // `Description` is a short description of the response.
+  // GFM syntax can be used for rich text representation.
+  string description = 1;
+  // `Schema` optionally defines the structure of the response.
+  // If `Schema` is not provided, it means there is no content to the response.
+  Schema schema = 2;
+  // `Headers` A list of headers that are sent with the response.
+  // `Header` name is expected to be a string in the canonical format of the MIME header key
+  // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+  map<string, Header> headers = 3;
+  // `Examples` gives per-mimetype response examples.
+  // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+  map<string, string> examples = 4;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 5;
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      title: "Echo API";
+//      version: "1.0";
+//      description: "";
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//    };
+//    ...
+//  };
+//
+message Info {
+  // The title of the application.
+  string title = 1;
+  // A short description of the application. GFM syntax can be used for rich
+  // text representation.
+  string description = 2;
+  // The Terms of Service for the API.
+  string terms_of_service = 3;
+  // The contact information for the exposed API.
+  Contact contact = 4;
+  // The license information for the exposed API.
+  License license = 5;
+  // Provides the version of the application API (not to be confused
+  // with the specification version).
+  string version = 6;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 7;
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      ...
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      ...
+//    };
+//    ...
+//  };
+//
+message Contact {
+  // The identifying name of the contact person/organization.
+  string name = 1;
+  // The URL pointing to the contact information. MUST be in the format of a
+  // URL.
+  string url = 2;
+  // The email address of the contact person/organization. MUST be in the format
+  // of an email address.
+  string email = 3;
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      ...
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//      ...
+//    };
+//    ...
+//  };
+//
+message License {
+  // The license name used for the API.
+  string name = 1;
+  // A URL to the license used for the API. MUST be in the format of a URL.
+  string url = 2;
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    ...
+//    external_docs: {
+//      description: "More about gRPC-Gateway";
+//      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//    }
+//    ...
+//  };
+//
+message ExternalDocumentation {
+  // A short description of the target documentation. GFM syntax can be used for
+  // rich text representation.
+  string description = 1;
+  // The URL for the target documentation. Value MUST be in the format
+  // of a URL.
+  string url = 2;
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+message Schema {
+  JSONSchema json_schema = 1;
+  // Adds support for polymorphism. The discriminator is the schema property
+  // name that is used to differentiate between other schema that inherit this
+  // schema. The property name used MUST be defined at this schema and it MUST
+  // be in the required property list. When used, the value MUST be the name of
+  // this schema or any schema that inherits it.
+  string discriminator = 2;
+  // Relevant only for Schema "properties" definitions. Declares the property as
+  // "read only". This means that it MAY be sent as part of a response but MUST
+  // NOT be sent as part of the request. Properties marked as readOnly being
+  // true SHOULD NOT be in the required list of the defined schema. Default
+  // value is false.
+  bool read_only = 3;
+  // field 4 is reserved for 'xml'.
+  reserved 4;
+  // Additional external documentation for this schema.
+  ExternalDocumentation external_docs = 5;
+  // A free-form property to include an example of an instance for this schema in JSON.
+  // This is copied verbatim to the output.
+  string example = 6;
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//    ...
+//    title: "MyEnum";
+//    description:"This is my nice enum";
+//    example: "ZERO";
+//    required: true;
+//    ...
+//  };
+//
+message EnumSchema {
+  // A short description of the schema.
+  string description = 1;
+  string default = 2;
+  // The title of the schema.
+  string title = 3;
+  bool required = 4;
+  bool read_only = 5;
+  // Additional external documentation for this schema.
+  ExternalDocumentation external_docs = 6;
+  string example = 7;
+  // Ref is used to define an external reference to include in the message.
+  // This could be a fully qualified proto message reference, and that type must
+  // be imported into the protofile. If no message is identified, the Ref will
+  // be used verbatim in the output.
+  // For example:
+  //  `ref: ".google.protobuf.Timestamp"`.
+  string ref = 8;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//  message SimpleMessage {
+//    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//      json_schema: {
+//        title: "SimpleMessage"
+//        description: "A simple message."
+//        required: ["id"]
+//      }
+//    };
+//
+//    // Id represents the message identifier.
+//    string id = 1; [
+//        (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//          description: "The unique identifier of the simple message."
+//        }];
+//  }
+//
+message JSONSchema {
+  // field 1 is reserved for '$id', omitted from OpenAPI v2.
+  reserved 1;
+  // field 2 is reserved for '$schema', omitted from OpenAPI v2.
+  reserved 2;
+  // Ref is used to define an external reference to include in the message.
+  // This could be a fully qualified proto message reference, and that type must
+  // be imported into the protofile. If no message is identified, the Ref will
+  // be used verbatim in the output.
+  // For example:
+  //  `ref: ".google.protobuf.Timestamp"`.
+  string ref = 3;
+  // field 4 is reserved for '$comment', omitted from OpenAPI v2.
+  reserved 4;
+  // The title of the schema.
+  string title = 5;
+  // A short description of the schema.
+  string description = 6;
+  string default = 7;
+  bool read_only = 8;
+  // A free-form property to include a JSON example of this field. This is copied
+  // verbatim to the output swagger.json. Quotes must be escaped.
+  // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+  string example = 9;
+  double multiple_of = 10;
+  // Maximum represents an inclusive upper limit for a numeric instance. The
+  // value of MUST be a number,
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  // minimum represents an inclusive lower limit for a numeric instance. The
+  // value of MUST be a number,
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  uint64 max_length = 15;
+  uint64 min_length = 16;
+  string pattern = 17;
+  // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2.
+  reserved 18;
+  // field 19 is reserved for 'items', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'items'?
+  reserved 19;
+  uint64 max_items = 20;
+  uint64 min_items = 21;
+  bool unique_items = 22;
+  // field 23 is reserved for 'contains', omitted from OpenAPI v2.
+  reserved 23;
+  uint64 max_properties = 24;
+  uint64 min_properties = 25;
+  repeated string required = 26;
+  // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific
+  // way. TODO(ivucica): add 'additionalProperties'?
+  reserved 27;
+  // field 28 is reserved for 'definitions', omitted from OpenAPI v2.
+  reserved 28;
+  // field 29 is reserved for 'properties', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'additionalProperties'?
+  reserved 29;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2:
+  // patternProperties, dependencies, propertyNames, const
+  reserved 30 to 33;
+  // Items in 'array' must be unique.
+  repeated string array = 34;
+
+  enum JSONSchemaSimpleTypes {
+    UNKNOWN = 0;
+    ARRAY = 1;
+    BOOLEAN = 2;
+    INTEGER = 3;
+    NULL = 4;
+    NUMBER = 5;
+    OBJECT = 6;
+    STRING = 7;
+  }
+
+  repeated JSONSchemaSimpleTypes type = 35;
+  // `Format`
+  string format = 36;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2: contentMediaType, contentEncoding, if, then, else
+  reserved 37 to 41;
+  // field 42 is reserved for 'allOf', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'allOf'?
+  reserved 42;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2:
+  // anyOf, oneOf, not
+  reserved 43 to 45;
+  // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+  repeated string enum = 46;
+
+  // Additional field level properties used when generating the OpenAPI v2 file.
+  FieldConfiguration field_configuration = 1001;
+
+  // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+  // These properties are not defined by OpenAPIv2, but they are used to control the generation.
+  message FieldConfiguration {
+    // Alternative parameter name when used as path parameter. If set, this will
+    // be used as the complete parameter name when this field is used as a path
+    // parameter. Use this to avoid having auto generated path parameter names
+    // for overlapping paths.
+    string path_param_name = 47;
+  }
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 48;
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+message Tag {
+  // The name of the tag. Use it to allow override of the name of a
+  // global Tag object, then use that name to reference the tag throughout the
+  // OpenAPI file.
+  string name = 1;
+  // A short description for the tag. GFM syntax can be used for rich text
+  // representation.
+  string description = 2;
+  // Additional external documentation for this tag.
+  ExternalDocumentation external_docs = 3;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 4;
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+message SecurityDefinitions {
+  // A single security scheme definition, mapping a "name" to the scheme it
+  // defines.
+  map<string, SecurityScheme> security = 1;
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+message SecurityScheme {
+  // The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  enum Type {
+    TYPE_INVALID = 0;
+    TYPE_BASIC = 1;
+    TYPE_API_KEY = 2;
+    TYPE_OAUTH2 = 3;
+  }
+
+  // The location of the API key. Valid values are "query" or "header".
+  enum In {
+    IN_INVALID = 0;
+    IN_QUERY = 1;
+    IN_HEADER = 2;
+  }
+
+  // The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  enum Flow {
+    FLOW_INVALID = 0;
+    FLOW_IMPLICIT = 1;
+    FLOW_PASSWORD = 2;
+    FLOW_APPLICATION = 3;
+    FLOW_ACCESS_CODE = 4;
+  }
+
+  // The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  Type type = 1;
+  // A short description for security scheme.
+  string description = 2;
+  // The name of the header or query parameter to be used.
+  // Valid for apiKey.
+  string name = 3;
+  // The location of the API key. Valid values are "query" or
+  // "header".
+  // Valid for apiKey.
+  In in = 4;
+  // The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  // Valid for oauth2.
+  Flow flow = 5;
+  // The authorization URL to be used for this flow. This SHOULD be in
+  // the form of a URL.
+  // Valid for oauth2/implicit and oauth2/accessCode.
+  string authorization_url = 6;
+  // The token URL to be used for this flow. This SHOULD be in the
+  // form of a URL.
+  // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+  string token_url = 7;
+  // The available scopes for the OAuth2 security scheme.
+  // Valid for oauth2.
+  Scopes scopes = 8;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+message SecurityRequirement {
+  // If the security scheme is of type "oauth2", then the value is a list of
+  // scope names required for the execution. For other security scheme types,
+  // the array MUST be empty.
+  message SecurityRequirementValue {
+    repeated string scope = 1;
+  }
+  // Each name must correspond to a security scheme which is declared in
+  // the Security Definitions. If the security scheme is of type "oauth2",
+  // then the value is a list of scope names required for the execution.
+  // For other security scheme types, the array MUST be empty.
+  map<string, SecurityRequirementValue> security_requirement = 1;
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+message Scopes {
+  // Maps between a name of a scope to a short description of it (as the value
+  // of the property).
+  map<string, string> scope = 1;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
new file mode 100644
index 0000000..1f0e0c2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
@@ -0,0 +1,4055 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+	Scheme_UNKNOWN Scheme = 0
+	Scheme_HTTP    Scheme = 1
+	Scheme_HTTPS   Scheme = 2
+	Scheme_WS      Scheme = 3
+	Scheme_WSS     Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+	Scheme_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "HTTP",
+		2: "HTTPS",
+		3: "WS",
+		4: "WSS",
+	}
+	Scheme_value = map[string]int32{
+		"UNKNOWN": 0,
+		"HTTP":    1,
+		"HTTPS":   2,
+		"WS":      3,
+		"WSS":     4,
+	}
+)
+
+func (x Scheme) Enum() *Scheme {
+	p := new(Scheme)
+	*p = x
+	return p
+}
+
+func (x Scheme) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+	HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+	HeaderParameter_STRING  HeaderParameter_Type = 1
+	HeaderParameter_NUMBER  HeaderParameter_Type = 2
+	HeaderParameter_INTEGER HeaderParameter_Type = 3
+	HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+	HeaderParameter_Type_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "STRING",
+		2: "NUMBER",
+		3: "INTEGER",
+		4: "BOOLEAN",
+	}
+	HeaderParameter_Type_value = map[string]int32{
+		"UNKNOWN": 0,
+		"STRING":  1,
+		"NUMBER":  2,
+		"INTEGER": 3,
+		"BOOLEAN": 4,
+	}
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+	p := new(HeaderParameter_Type)
+	*p = x
+	return p
+}
+
+func (x HeaderParameter_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+	JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+	JSONSchema_ARRAY   JSONSchema_JSONSchemaSimpleTypes = 1
+	JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+	JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+	JSONSchema_NULL    JSONSchema_JSONSchemaSimpleTypes = 4
+	JSONSchema_NUMBER  JSONSchema_JSONSchemaSimpleTypes = 5
+	JSONSchema_OBJECT  JSONSchema_JSONSchemaSimpleTypes = 6
+	JSONSchema_STRING  JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+	JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "ARRAY",
+		2: "BOOLEAN",
+		3: "INTEGER",
+		4: "NULL",
+		5: "NUMBER",
+		6: "OBJECT",
+		7: "STRING",
+	}
+	JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+		"UNKNOWN": 0,
+		"ARRAY":   1,
+		"BOOLEAN": 2,
+		"INTEGER": 3,
+		"NULL":    4,
+		"NUMBER":  5,
+		"OBJECT":  6,
+		"STRING":  7,
+	}
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+	p := new(JSONSchema_JSONSchemaSimpleTypes)
+	*p = x
+	return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+	SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+	SecurityScheme_TYPE_BASIC   SecurityScheme_Type = 1
+	SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+	SecurityScheme_TYPE_OAUTH2  SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+	SecurityScheme_Type_name = map[int32]string{
+		0: "TYPE_INVALID",
+		1: "TYPE_BASIC",
+		2: "TYPE_API_KEY",
+		3: "TYPE_OAUTH2",
+	}
+	SecurityScheme_Type_value = map[string]int32{
+		"TYPE_INVALID": 0,
+		"TYPE_BASIC":   1,
+		"TYPE_API_KEY": 2,
+		"TYPE_OAUTH2":  3,
+	}
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+	p := new(SecurityScheme_Type)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+	SecurityScheme_IN_INVALID SecurityScheme_In = 0
+	SecurityScheme_IN_QUERY   SecurityScheme_In = 1
+	SecurityScheme_IN_HEADER  SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+	SecurityScheme_In_name = map[int32]string{
+		0: "IN_INVALID",
+		1: "IN_QUERY",
+		2: "IN_HEADER",
+	}
+	SecurityScheme_In_value = map[string]int32{
+		"IN_INVALID": 0,
+		"IN_QUERY":   1,
+		"IN_HEADER":  2,
+	}
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+	p := new(SecurityScheme_In)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_In) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+	SecurityScheme_FLOW_INVALID     SecurityScheme_Flow = 0
+	SecurityScheme_FLOW_IMPLICIT    SecurityScheme_Flow = 1
+	SecurityScheme_FLOW_PASSWORD    SecurityScheme_Flow = 2
+	SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+	SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+	SecurityScheme_Flow_name = map[int32]string{
+		0: "FLOW_INVALID",
+		1: "FLOW_IMPLICIT",
+		2: "FLOW_PASSWORD",
+		3: "FLOW_APPLICATION",
+		4: "FLOW_ACCESS_CODE",
+	}
+	SecurityScheme_Flow_value = map[string]int32{
+		"FLOW_INVALID":     0,
+		"FLOW_IMPLICIT":    1,
+		"FLOW_PASSWORD":    2,
+		"FLOW_APPLICATION": 3,
+		"FLOW_ACCESS_CODE": 4,
+	}
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+	p := new(SecurityScheme_Flow)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  schemes: HTTPS;
+//	  consumes: "application/json";
+//	  produces: "application/json";
+//	};
+type Swagger struct {
+	state                          protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Swagger             string                     `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+	xxx_hidden_Info                *Info                      `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	xxx_hidden_Host                string                     `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+	xxx_hidden_BasePath            string                     `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+	xxx_hidden_Schemes             []Scheme                   `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	xxx_hidden_Consumes            []string                   `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	xxx_hidden_Produces            []string                   `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	xxx_hidden_Responses           map[string]*Response       `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_SecurityDefinitions *SecurityDefinitions       `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+	xxx_hidden_Security            *[]*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	xxx_hidden_Tags                *[]*Tag                    `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+	xxx_hidden_ExternalDocs        *ExternalDocumentation     `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Extensions          map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                  protoimpl.UnknownFields
+	sizeCache                      protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+	*x = Swagger{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+	if x != nil {
+		return x.xxx_hidden_Swagger
+	}
+	return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+	if x != nil {
+		return x.xxx_hidden_Info
+	}
+	return nil
+}
+
+func (x *Swagger) GetHost() string {
+	if x != nil {
+		return x.xxx_hidden_Host
+	}
+	return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+	if x != nil {
+		return x.xxx_hidden_BasePath
+	}
+	return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+	if x != nil {
+		return x.xxx_hidden_Schemes
+	}
+	return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+	if x != nil {
+		return x.xxx_hidden_Consumes
+	}
+	return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+	if x != nil {
+		return x.xxx_hidden_Produces
+	}
+	return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.xxx_hidden_Responses
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+	if x != nil {
+		return x.xxx_hidden_SecurityDefinitions
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		if x.xxx_hidden_Security != nil {
+			return *x.xxx_hidden_Security
+		}
+	}
+	return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+	if x != nil {
+		if x.xxx_hidden_Tags != nil {
+			return *x.xxx_hidden_Tags
+		}
+	}
+	return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+	x.xxx_hidden_Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+	x.xxx_hidden_Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+	x.xxx_hidden_Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+	x.xxx_hidden_BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+	x.xxx_hidden_Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+	x.xxx_hidden_Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+	x.xxx_hidden_Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+	x.xxx_hidden_Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+	x.xxx_hidden_SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+	x.xxx_hidden_Security = &v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+	x.xxx_hidden_Tags = &v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+	x.xxx_hidden_Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+	x.xxx_hidden_SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+	m0 := &Swagger{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Swagger = b.Swagger
+	x.xxx_hidden_Info = b.Info
+	x.xxx_hidden_Host = b.Host
+	x.xxx_hidden_BasePath = b.BasePath
+	x.xxx_hidden_Schemes = b.Schemes
+	x.xxx_hidden_Consumes = b.Consumes
+	x.xxx_hidden_Produces = b.Produces
+	x.xxx_hidden_Responses = b.Responses
+	x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions
+	x.xxx_hidden_Security = &b.Security
+	x.xxx_hidden_Tags = &b.Tags
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//	service EchoService {
+//	  rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//	    option (google.api.http) = {
+//	      get: "/v1/example/echo/{id}"
+//	    };
+//
+//	    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//	      summary: "Get a message.";
+//	      operation_id: "getMessage";
+//	      tags: "echo";
+//	      responses: {
+//	        key: "200"
+//	          value: {
+//	          description: "OK";
+//	        }
+//	      }
+//	    };
+//	  }
+//	}
+type Operation struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Tags         []string                   `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+	xxx_hidden_Summary      string                     `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_OperationId  string                     `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	xxx_hidden_Consumes     []string                   `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	xxx_hidden_Produces     []string                   `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	xxx_hidden_Responses    map[string]*Response       `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Schemes      []Scheme                   `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	xxx_hidden_Deprecated   bool                       `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+	xxx_hidden_Security     *[]*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Parameters   *Parameters                `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+	*x = Operation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+	if x != nil {
+		return x.xxx_hidden_Tags
+	}
+	return nil
+}
+
+func (x *Operation) GetSummary() string {
+	if x != nil {
+		return x.xxx_hidden_Summary
+	}
+	return ""
+}
+
+func (x *Operation) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Operation) GetOperationId() string {
+	if x != nil {
+		return x.xxx_hidden_OperationId
+	}
+	return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+	if x != nil {
+		return x.xxx_hidden_Consumes
+	}
+	return nil
+}
+
+func (x *Operation) GetProduces() []string {
+	if x != nil {
+		return x.xxx_hidden_Produces
+	}
+	return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.xxx_hidden_Responses
+	}
+	return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+	if x != nil {
+		return x.xxx_hidden_Schemes
+	}
+	return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+	if x != nil {
+		return x.xxx_hidden_Deprecated
+	}
+	return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		if x.xxx_hidden_Security != nil {
+			return *x.xxx_hidden_Security
+		}
+	}
+	return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+	if x != nil {
+		return x.xxx_hidden_Parameters
+	}
+	return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+	x.xxx_hidden_Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+	x.xxx_hidden_Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+	x.xxx_hidden_OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+	x.xxx_hidden_Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+	x.xxx_hidden_Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+	x.xxx_hidden_Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+	x.xxx_hidden_Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+	x.xxx_hidden_Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+	x.xxx_hidden_Security = &v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+	x.xxx_hidden_Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+	x.xxx_hidden_Parameters = nil
+}
+
+type Operation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+	m0 := &Operation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Tags = b.Tags
+	x.xxx_hidden_Summary = b.Summary
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_OperationId = b.OperationId
+	x.xxx_hidden_Consumes = b.Consumes
+	x.xxx_hidden_Produces = b.Produces
+	x.xxx_hidden_Responses = b.Responses
+	x.xxx_hidden_Schemes = b.Schemes
+	x.xxx_hidden_Deprecated = b.Deprecated
+	x.xxx_hidden_Security = &b.Security
+	x.xxx_hidden_Extensions = b.Extensions
+	x.xxx_hidden_Parameters = b.Parameters
+	return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+	state              protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Headers *[]*HeaderParameter    `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+	unknownFields      protoimpl.UnknownFields
+	sizeCache          protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+	*x = Parameters{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+	if x != nil {
+		if x.xxx_hidden_Headers != nil {
+			return *x.xxx_hidden_Headers
+		}
+	}
+	return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+	x.xxx_hidden_Headers = &v
+}
+
+type Parameters_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+	m0 := &Parameters{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Headers = &b.Headers
+	return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name        string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Description string                 `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Type        HeaderParameter_Type   `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+	xxx_hidden_Format      string                 `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Required    bool                   `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+	*x = HeaderParameter{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+	x.xxx_hidden_Required = v
+}
+
+type HeaderParameter_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Name` is the header name.
+	Name string
+	// `Description` is a short description of the header.
+	Description string
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Required` indicates if the header is optional
+	Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+	m0 := &HeaderParameter{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Required = b.Required
+	return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Description string                 `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Type        string                 `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	xxx_hidden_Format      string                 `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Default     string                 `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_Pattern     string                 `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+	*x = Header{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Header) GetType() string {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return ""
+}
+
+func (x *Header) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *Header) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *Header) GetPattern() string {
+	if x != nil {
+		return x.xxx_hidden_Pattern
+	}
+	return ""
+}
+
+func (x *Header) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Header) SetType(v string) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+	x.xxx_hidden_Pattern = v
+}
+
+type Header_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the header.
+	Description string
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+	m0 := &Header{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_Pattern = b.Pattern
+	return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+	state                  protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Description string                     `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Schema      *Schema                    `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+	xxx_hidden_Headers     map[string]*Header         `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Examples    map[string]string          `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Extensions  map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+	*x = Response{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+	if x != nil {
+		return x.xxx_hidden_Schema
+	}
+	return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+	if x != nil {
+		return x.xxx_hidden_Headers
+	}
+	return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+	if x != nil {
+		return x.xxx_hidden_Examples
+	}
+	return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Response) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+	x.xxx_hidden_Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+	x.xxx_hidden_Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+	x.xxx_hidden_Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+	x.xxx_hidden_Schema = nil
+}
+
+type Response_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+	m0 := &Response{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Schema = b.Schema
+	x.xxx_hidden_Headers = b.Headers
+	x.xxx_hidden_Examples = b.Examples
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  ...
+//	};
+type Info struct {
+	state                     protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Title          string                     `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Description    string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_TermsOfService string                     `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+	xxx_hidden_Contact        *Contact                   `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+	xxx_hidden_License        *License                   `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+	xxx_hidden_Version        string                     `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+	xxx_hidden_Extensions     map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields             protoimpl.UnknownFields
+	sizeCache                 protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+	*x = Info{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *Info) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+	if x != nil {
+		return x.xxx_hidden_TermsOfService
+	}
+	return ""
+}
+
+func (x *Info) GetContact() *Contact {
+	if x != nil {
+		return x.xxx_hidden_Contact
+	}
+	return nil
+}
+
+func (x *Info) GetLicense() *License {
+	if x != nil {
+		return x.xxx_hidden_License
+	}
+	return nil
+}
+
+func (x *Info) GetVersion() string {
+	if x != nil {
+		return x.xxx_hidden_Version
+	}
+	return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Info) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+	x.xxx_hidden_TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+	x.xxx_hidden_Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+	x.xxx_hidden_License = v
+}
+
+func (x *Info) SetVersion(v string) {
+	x.xxx_hidden_Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_License != nil
+}
+
+func (x *Info) ClearContact() {
+	x.xxx_hidden_Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+	x.xxx_hidden_License = nil
+}
+
+type Info_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The title of the application.
+	Title string
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string
+	// The Terms of Service for the API.
+	TermsOfService string
+	// The contact information for the exposed API.
+	Contact *Contact
+	// The license information for the exposed API.
+	License *License
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+	m0 := &Info{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_TermsOfService = b.TermsOfService
+	x.xxx_hidden_Contact = b.Contact
+	x.xxx_hidden_License = b.License
+	x.xxx_hidden_Version = b.Version
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type Contact struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name  string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Url   string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	xxx_hidden_Email string                 `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+	*x = Contact{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *Contact) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *Contact) GetEmail() string {
+	if x != nil {
+		return x.xxx_hidden_Email
+	}
+	return ""
+}
+
+func (x *Contact) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+	x.xxx_hidden_Email = v
+}
+
+type Contact_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The identifying name of the contact person/organization.
+	Name string
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+	m0 := &Contact{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Url = b.Url
+	x.xxx_hidden_Email = b.Email
+	return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type License struct {
+	state           protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Url  string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+	*x = License{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *License) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *License) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *License) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+type License_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The license name used for the API.
+	Name string
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url string
+}
+
+func (b0 License_builder) Build() *License {
+	m0 := &License{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Url = b.Url
+	return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  ...
+//	  external_docs: {
+//	    description: "More about gRPC-Gateway";
+//	    url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	  }
+//	  ...
+//	};
+type ExternalDocumentation struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Description string                 `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Url         string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+	*x = ExternalDocumentation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+type ExternalDocumentation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+	m0 := &ExternalDocumentation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Url = b.Url
+	return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+	state                    protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_JsonSchema    *JSONSchema            `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+	xxx_hidden_Discriminator string                 `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+	xxx_hidden_ReadOnly      bool                   `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_ExternalDocs  *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Example       string                 `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+	unknownFields            protoimpl.UnknownFields
+	sizeCache                protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+	*x = Schema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+	if x != nil {
+		return x.xxx_hidden_JsonSchema
+	}
+	return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+	if x != nil {
+		return x.xxx_hidden_Discriminator
+	}
+	return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Schema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+	x.xxx_hidden_JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+	x.xxx_hidden_Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+	x.xxx_hidden_JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Schema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	JsonSchema *JSONSchema
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+	m0 := &Schema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_JsonSchema = b.JsonSchema
+	x.xxx_hidden_Discriminator = b.Discriminator
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Example = b.Example
+	return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//	  ...
+//	  title: "MyEnum";
+//	  description:"This is my nice enum";
+//	  example: "ZERO";
+//	  required: true;
+//	  ...
+//	};
+type EnumSchema struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Default      string                     `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_Title        string                     `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Required     bool                       `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+	xxx_hidden_ReadOnly     bool                       `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Example      string                     `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+	xxx_hidden_Ref          string                     `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+	*x = EnumSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+	if x != nil {
+		return x.xxx_hidden_Ref
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+	x.xxx_hidden_Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+	x.xxx_hidden_Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the schema.
+	Description string
+	Default     string
+	// The title of the schema.
+	Title    string
+	Required bool
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	Example      string
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+	m0 := &EnumSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Required = b.Required
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Example = b.Example
+	x.xxx_hidden_Ref = b.Ref
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//	message SimpleMessage {
+//	  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//	    json_schema: {
+//	      title: "SimpleMessage"
+//	      description: "A simple message."
+//	      required: ["id"]
+//	    }
+//	  };
+//
+//	  // Id represents the message identifier.
+//	  string id = 1; [
+//	      (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//	        description: "The unique identifier of the simple message."
+//	      }];
+//	}
+type JSONSchema struct {
+	state                         protoimpl.MessageState             `protogen:"opaque.v1"`
+	xxx_hidden_Ref                string                             `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	xxx_hidden_Title              string                             `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Description        string                             `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Default            string                             `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_ReadOnly           bool                               `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_Example            string                             `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+	xxx_hidden_MultipleOf         float64                            `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+	xxx_hidden_Maximum            float64                            `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+	xxx_hidden_ExclusiveMaximum   bool                               `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+	xxx_hidden_Minimum            float64                            `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+	xxx_hidden_ExclusiveMinimum   bool                               `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+	xxx_hidden_MaxLength          uint64                             `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+	xxx_hidden_MinLength          uint64                             `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+	xxx_hidden_Pattern            string                             `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	xxx_hidden_MaxItems           uint64                             `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+	xxx_hidden_MinItems           uint64                             `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+	xxx_hidden_UniqueItems        bool                               `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+	xxx_hidden_MaxProperties      uint64                             `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+	xxx_hidden_MinProperties      uint64                             `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+	xxx_hidden_Required           []string                           `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+	xxx_hidden_Array              []string                           `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+	xxx_hidden_Type               []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+	xxx_hidden_Format             string                             `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Enum               []string                           `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+	xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration     `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+	xxx_hidden_Extensions         map[string]*structpb.Value         `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                 protoimpl.UnknownFields
+	sizeCache                     protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+	*x = JSONSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+	if x != nil {
+		return x.xxx_hidden_Ref
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *JSONSchema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+	if x != nil {
+		return x.xxx_hidden_MultipleOf
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+	if x != nil {
+		return x.xxx_hidden_Maximum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+	if x != nil {
+		return x.xxx_hidden_ExclusiveMaximum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+	if x != nil {
+		return x.xxx_hidden_Minimum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+	if x != nil {
+		return x.xxx_hidden_ExclusiveMinimum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+	if x != nil {
+		return x.xxx_hidden_Pattern
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+	if x != nil {
+		return x.xxx_hidden_UniqueItems
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+	if x != nil {
+		return x.xxx_hidden_Array
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+	if x != nil {
+		return x.xxx_hidden_Enum
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+	if x != nil {
+		return x.xxx_hidden_FieldConfiguration
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+	x.xxx_hidden_Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+	x.xxx_hidden_MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+	x.xxx_hidden_Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+	x.xxx_hidden_ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+	x.xxx_hidden_Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+	x.xxx_hidden_ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+	x.xxx_hidden_MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+	x.xxx_hidden_MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+	x.xxx_hidden_Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+	x.xxx_hidden_MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+	x.xxx_hidden_MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+	x.xxx_hidden_UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+	x.xxx_hidden_MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+	x.xxx_hidden_MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+	x.xxx_hidden_Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+	x.xxx_hidden_Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+	x.xxx_hidden_Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+	x.xxx_hidden_FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+	x.xxx_hidden_FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// The title of the schema.
+	Title string
+	// A short description of the schema.
+	Description string
+	Default     string
+	ReadOnly    bool
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string
+	MultipleOf float64
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64
+	ExclusiveMaximum bool
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64
+	ExclusiveMinimum bool
+	MaxLength        uint64
+	MinLength        uint64
+	Pattern          string
+	MaxItems         uint64
+	MinItems         uint64
+	UniqueItems      bool
+	MaxProperties    uint64
+	MinProperties    uint64
+	Required         []string
+	// Items in 'array' must be unique.
+	Array []string
+	Type  []JSONSchema_JSONSchemaSimpleTypes
+	// `Format`
+	Format string
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+	m0 := &JSONSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Ref = b.Ref
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_Example = b.Example
+	x.xxx_hidden_MultipleOf = b.MultipleOf
+	x.xxx_hidden_Maximum = b.Maximum
+	x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum
+	x.xxx_hidden_Minimum = b.Minimum
+	x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum
+	x.xxx_hidden_MaxLength = b.MaxLength
+	x.xxx_hidden_MinLength = b.MinLength
+	x.xxx_hidden_Pattern = b.Pattern
+	x.xxx_hidden_MaxItems = b.MaxItems
+	x.xxx_hidden_MinItems = b.MinItems
+	x.xxx_hidden_UniqueItems = b.UniqueItems
+	x.xxx_hidden_MaxProperties = b.MaxProperties
+	x.xxx_hidden_MinProperties = b.MinProperties
+	x.xxx_hidden_Required = b.Required
+	x.xxx_hidden_Array = b.Array
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Enum = b.Enum
+	x.xxx_hidden_FieldConfiguration = b.FieldConfiguration
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Name         string                     `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+	*x = Tag{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *Tag) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Tag) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Tag_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+	m0 := &Tag{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+	state               protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+	*x = SecurityDefinitions{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+	if x != nil {
+		return x.xxx_hidden_Security
+	}
+	return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+	x.xxx_hidden_Security = v
+}
+
+type SecurityDefinitions_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+	m0 := &SecurityDefinitions{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Security = b.Security
+	return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+	state                       protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Type             SecurityScheme_Type        `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+	xxx_hidden_Description      string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Name             string                     `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_In               SecurityScheme_In          `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+	xxx_hidden_Flow             SecurityScheme_Flow        `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+	xxx_hidden_AuthorizationUrl string                     `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+	xxx_hidden_TokenUrl         string                     `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+	xxx_hidden_Scopes           *Scopes                    `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+	xxx_hidden_Extensions       map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields               protoimpl.UnknownFields
+	sizeCache                   protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+	*x = SecurityScheme{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+	if x != nil {
+		return x.xxx_hidden_In
+	}
+	return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+	if x != nil {
+		return x.xxx_hidden_Flow
+	}
+	return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+	if x != nil {
+		return x.xxx_hidden_AuthorizationUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+	if x != nil {
+		return x.xxx_hidden_TokenUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+	if x != nil {
+		return x.xxx_hidden_Scopes
+	}
+	return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+	x.xxx_hidden_In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+	x.xxx_hidden_Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+	x.xxx_hidden_AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+	x.xxx_hidden_TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+	x.xxx_hidden_Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+	x.xxx_hidden_Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type
+	// A short description for security scheme.
+	Description string
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+	m0 := &SecurityScheme{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_In = b.In
+	x.xxx_hidden_Flow = b.Flow
+	x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl
+	x.xxx_hidden_TokenUrl = b.TokenUrl
+	x.xxx_hidden_Scopes = b.Scopes
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+	state                          protoimpl.MessageState                                   `protogen:"opaque.v1"`
+	xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                  protoimpl.UnknownFields
+	sizeCache                      protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+	*x = SecurityRequirement{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+	if x != nil {
+		return x.xxx_hidden_SecurityRequirement
+	}
+	return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+	x.xxx_hidden_SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+	m0 := &SecurityRequirement{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_SecurityRequirement = b.SecurityRequirement
+	return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Scope map[string]string      `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+	*x = Scopes{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+	if x != nil {
+		return x.xxx_hidden_Scope
+	}
+	return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+	x.xxx_hidden_Scope = v
+}
+
+type Scopes_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+	m0 := &Scopes{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Scope = b.Scope
+	return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+	state                    protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_PathParamName string                 `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+	unknownFields            protoimpl.UnknownFields
+	sizeCache                protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+	*x = JSONSchema_FieldConfiguration{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+	if x != nil {
+		return x.xxx_hidden_PathParamName
+	}
+	return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+	x.xxx_hidden_PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+	m0 := &JSONSchema_FieldConfiguration{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_PathParamName = b.PathParamName
+	return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Scope []string               `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+	*x = SecurityRequirement_SecurityRequirementValue{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+	if x != nil {
+		return x.xxx_hidden_Scope
+	}
+	return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+	x.xxx_hidden_Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+	m0 := &SecurityRequirement_SecurityRequirementValue{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Scope = b.Scope
+	return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+	0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+	0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+	0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+	0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+	0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+	0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+	0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+	0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+	0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+	0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+	0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+	0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+	0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+	0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+	0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+	0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+	0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+	0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+	0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+	0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+	0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+	0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+	0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+	0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+	0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+	0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+	0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+	0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+	0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+	0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+	0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+	0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+	0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+	0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+	0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+	0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+	0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+	0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+	0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+	0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+	0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+	0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+	0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+	0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+	0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+	0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+	0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+	0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+	0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+	0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+	0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+	0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+	0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+	0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+	0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+	0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+	0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+	0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+	0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+	0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+	0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+	0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+	0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+	0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+	0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+	0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+	0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+	0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+	0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+	0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+	0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+	0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+	0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+	0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+	0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+	0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+	0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+	0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+	0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+	0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+	0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+	0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+	0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+	0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+	0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+	0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+	0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+	0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+	0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+	0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+	0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+	0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+	0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+	0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+	0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+	0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+	0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+	0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+	0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+	0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+	0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+	0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+	0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+	0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+	0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+	0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+	0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+	0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+	0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+	0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+	0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+	0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+	0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+	0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+	0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+	0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+	0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+	0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+	0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+	0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+	0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+	0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+	0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+	0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+	0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+	0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+	0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+	0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+	0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+	0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+	0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+	0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+	(Scheme)(0),                           // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	(HeaderParameter_Type)(0),             // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	(JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	(SecurityScheme_Type)(0),              // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	(SecurityScheme_In)(0),                // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	(SecurityScheme_Flow)(0),              // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	(*Swagger)(nil),                       // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                     // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Parameters)(nil),                    // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	(*HeaderParameter)(nil),               // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	(*Header)(nil),                        // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+	(*Response)(nil),                      // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+	(*Info)(nil),                          // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+	(*Contact)(nil),                       // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+	(*License)(nil),                       // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+	(*ExternalDocumentation)(nil),         // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	(*Schema)(nil),                        // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                    // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*JSONSchema)(nil),                    // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	(*Tag)(nil),                           // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*SecurityDefinitions)(nil),           // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	(*SecurityScheme)(nil),                // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	(*SecurityRequirement)(nil),           // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	(*Scopes)(nil),                        // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	nil,                                   // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	nil,                                   // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	nil,                                   // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	nil,                                   // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	nil,                                   // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	nil,                                   // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	nil,                                   // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	nil,                                   // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	nil,                                   // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	(*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	nil,                                   // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	nil,                                   // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	nil,                                   // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	nil,                                   // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	(*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	nil,                    // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	nil,                    // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	(*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+	12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+	0,  // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	0,  // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	8,  // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	9,  // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	1,  // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+	14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+	31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	2,  // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	3,  // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	4,  // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	5,  // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+	41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	53, // [53:53] is the sub-list for method output_type
+	53, // [53:53] is the sub-list for method input_type
+	53, // [53:53] is the sub-list for extension type_name
+	53, // [53:53] is the sub-list for extension extendee
+	0,  // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+	if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+			NumEnums:      6,
+			NumMessages:   35,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+		EnumInfos:         file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+		MessageInfos:      file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+	file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
index e87a115..a320b30 100644
--- a/vendor/github.com/hashicorp/go-uuid/LICENSE
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -1,3 +1,5 @@
+Copyright © 2015-2022 HashiCorp, Inc.
+
 Mozilla Public License, version 2.0
 
 1. Definitions
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
index e4571ce..fd01342 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
@@ -89,7 +89,12 @@
 		return tkt, skey, nil
 	}
 	princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
-	realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1])
+	realm := cl.spnRealm(princ)
+
+	// if we don't know the SPN's realm, ask the client realm's KDC
+	if realm == "" {
+		realm = cl.Credentials.Realm()
+	}
 
 	tgt, skey, err := cl.sessionTGT(realm)
 	if err != nil {
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
index 634f015..688ad3a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
@@ -2,7 +2,6 @@
 
 import (
 	"encoding/binary"
-	"errors"
 	"fmt"
 	"io"
 	"net"
@@ -173,7 +172,7 @@
 		}
 		return rb, nil
 	}
-	return nil, errors.New("error in getting a TCP connection to any of the KDCs")
+	return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; "))
 }
 
 // sendTCP sends bytes to connection over TCP.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
index f7654d0..7e1e65c 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
@@ -41,7 +41,9 @@
 			// Cancel the one in the cache and add this one.
 			i.mux.Lock()
 			defer i.mux.Unlock()
-			i.cancel <- true
+			if i.cancel != nil {
+				i.cancel <- true
+			}
 			s.Entries[sess.realm] = sess
 			return
 		}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
index a763843..f448c8a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
@@ -95,7 +95,7 @@
 	opts := asn1.BitString{}
 	opts.Bytes, _ = hex.DecodeString("00000010")
 	opts.BitLength = len(opts.Bytes) * 8
-	return LibDefaults{
+	l := LibDefaults{
 		CCacheType:              4,
 		Clockskew:               time.Duration(300) * time.Second,
 		DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid),
@@ -115,6 +115,10 @@
 		UDPPreferenceLimit:      1465,
 		PreferredPreauthTypes:   []int{17, 16, 15, 14},
 	}
+	l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto)
+	l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto)
+	l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto)
+	return l
 }
 
 // Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct.
@@ -507,7 +511,7 @@
 			return r
 		}
 	}
-	return c.LibDefaults.DefaultRealm
+	return ""
 }
 
 // Load the KRB5 configuration from the specified file path.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
index c3b35c7..a021e41 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
@@ -4,7 +4,7 @@
 	"bytes"
 	"encoding/binary"
 	"errors"
-	"io/ioutil"
+	"os"
 	"strings"
 	"time"
 	"unsafe"
@@ -63,7 +63,7 @@
 // LoadCCache loads a credential cache file into a CCache type.
 func LoadCCache(cpath string) (*CCache, error) {
 	c := new(CCache)
-	b, err := ioutil.ReadFile(cpath)
+	b, err := os.ReadFile(cpath)
 	if err != nil {
 		return c, err
 	}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
index 5c2e9d7..cd1b8b1 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
@@ -8,7 +8,7 @@
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
+	"os"
 	"strings"
 	"time"
 	"unsafe"
@@ -93,7 +93,7 @@
 		}
 	}
 	if len(key.KeyValue) < 1 {
-		return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype)
+		return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %q realm: %v kvno: %v etype: %v", princName.PrincipalNameString(), realm, kvno, etype)
 	}
 	return key, kv, nil
 }
@@ -170,7 +170,7 @@
 // Load a Keytab file into a Keytab type.
 func Load(ktPath string) (*Keytab, error) {
 	kt := new(Keytab)
-	b, err := ioutil.ReadFile(ktPath)
+	b, err := os.ReadFile(ktPath)
 	if err != nil {
 		return kt, err
 	}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
index 1fdba78..115a02a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
@@ -43,7 +43,7 @@
 		Cksum:     Checksum{},
 		Cusec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
 		CTime:     t,
-		SeqNumber: seq.Int64(),
+		SeqNumber: seq.Int64() & 0x3fffffff,
 	}, nil
 }
 
@@ -53,7 +53,7 @@
 	if err != nil {
 		return err
 	}
-	a.SeqNumber = seq.Int64()
+	a.SeqNumber = seq.Int64() & 0x3fffffff
 	//Generate subkey value
 	sk := make([]byte, keySize, keySize)
 	rand.Read(sk)
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
index d645695..b53b91d 100644
--- a/vendor/github.com/jhump/protoreflect/LICENSE
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -187,7 +187,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright [yyyy] [name of copyright owner]
+   Copyright 2017 Joshua Humphries
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go
index b6f4ed0..7e5c568 100644
--- a/vendor/github.com/jhump/protoreflect/codec/codec.go
+++ b/vendor/github.com/jhump/protoreflect/codec/codec.go
@@ -4,6 +4,7 @@
 	"io"
 
 	"github.com/golang/protobuf/proto"
+
 	"github.com/jhump/protoreflect/internal/codec"
 )
 
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
index 02f8a32..0edb817 100644
--- a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -7,32 +7,32 @@
 	"math"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 )
 
-var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
 
 func init() {
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true
 
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true
 
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true
 }
 
 // ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
@@ -117,53 +117,53 @@
 // messages, bytes, and strings), an error is returned.
 func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return v != 0, nil
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return uint32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		s := int64(v)
 		if s > math.MaxInt32 || s < math.MinInt32 {
 			return nil, ErrOverflow
 		}
 		return int32(s), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return int32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return DecodeZigZag32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		return v, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		return int64(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return DecodeZigZag64(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return math.Float32frombits(uint32(v)), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return math.Float64frombits(v), nil
 
 	default:
@@ -184,14 +184,14 @@
 // contains multiple values, only the last value is returned.
 func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
 	switch {
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return bytes, nil
 
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return string(bytes), nil
 
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
-		fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE ||
+		fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		msg := mf.NewMessage(fd.GetMessageType())
 		err := proto.Unmarshal(bytes, msg)
 		if err != nil {
@@ -263,7 +263,7 @@
 		}
 
 	case proto.WireBytes:
-		alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+		alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES
 		var raw []byte
 		raw, err = b.DecodeRawBytes(alloc)
 		if err == nil {
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
index 499aa95..280f730 100644
--- a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -7,7 +7,7 @@
 	"sort"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 )
@@ -175,74 +175,74 @@
 
 func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		v := val.(bool)
 		if v {
 			return b.EncodeVarint(1)
 		}
 		return b.EncodeVarint(0)
 
-	case descriptor.FieldDescriptorProto_TYPE_ENUM,
-		descriptor.FieldDescriptorProto_TYPE_INT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32:
 		v := val.(int32)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		v := val.(int32)
 		return b.EncodeFixed32(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		v := val.(int32)
 		return b.EncodeVarint(EncodeZigZag32(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		v := val.(uint32)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		v := val.(uint32)
 		return b.EncodeFixed32(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64:
 		v := val.(int64)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		v := val.(int64)
 		return b.EncodeFixed64(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		v := val.(int64)
 		return b.EncodeVarint(EncodeZigZag64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		v := val.(uint64)
 		return b.EncodeVarint(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		v := val.(uint64)
 		return b.EncodeFixed64(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		v := val.(float64)
 		return b.EncodeFixed64(math.Float64bits(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		v := val.(float32)
 		return b.EncodeFixed32(uint64(math.Float32bits(v)))
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		v := val.([]byte)
 		return b.EncodeRawBytes(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		v := val.(string)
 		return b.EncodeRawBytes(([]byte)(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
 		return b.EncodeDelimitedMessage(val.(proto.Message))
 
-	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		// just append the nested message to this buffer
 		return b.EncodeMessage(val.(proto.Message))
 		// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
@@ -252,34 +252,34 @@
 	}
 }
 
-func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) {
 	switch t {
-	case descriptor.FieldDescriptorProto_TYPE_ENUM,
-		descriptor.FieldDescriptorProto_TYPE_BOOL,
-		descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+		descriptorpb.FieldDescriptorProto_TYPE_BOOL,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return proto.WireVarint, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
-		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return proto.WireFixed32, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
-		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return proto.WireFixed64, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES,
-		descriptor.FieldDescriptorProto_TYPE_STRING,
-		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES,
+		descriptorpb.FieldDescriptorProto_TYPE_STRING,
+		descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
 		return proto.WireBytes, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		return proto.WireStartGroup, nil
 
 	default:
diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go
new file mode 100644
index 0000000..418632b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/cache.go
@@ -0,0 +1,48 @@
+package desc
+
+import (
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type descriptorCache interface {
+	get(protoreflect.Descriptor) Descriptor
+	put(protoreflect.Descriptor, Descriptor)
+}
+
+type lockingCache struct {
+	cacheMu sync.RWMutex
+	cache   mapCache
+}
+
+func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor {
+	c.cacheMu.RLock()
+	defer c.cacheMu.RUnlock()
+	return c.cache.get(d)
+}
+
+func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) {
+	c.cacheMu.Lock()
+	defer c.cacheMu.Unlock()
+	c.cache.put(key, val)
+}
+
+func (c *lockingCache) withLock(fn func(descriptorCache)) {
+	c.cacheMu.Lock()
+	defer c.cacheMu.Unlock()
+	// Pass the underlying mapCache. We don't want fn to use
+	// c.get or c.put sine we already have the lock. So those
+	// methods would try to re-acquire and then deadlock!
+	fn(c.cache)
+}
+
+type mapCache map[protoreflect.Descriptor]Descriptor
+
+func (c mapCache) get(d protoreflect.Descriptor) Descriptor {
+	return c[d]
+}
+
+func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) {
+	c[key] = val
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
index 538820c..01a6e9e 100644
--- a/vendor/github.com/jhump/protoreflect/desc/convert.go
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -5,7 +5,10 @@
 	"fmt"
 	"strings"
 
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc/internal"
 	intn "github.com/jhump/protoreflect/internal"
@@ -15,34 +18,87 @@
 // The file's direct dependencies must be provided. If the given dependencies do not include
 // all of the file's dependencies or if the contents of the descriptors are internally
 // inconsistent (e.g. contain unresolvable symbols) then an error is returned.
-func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
 	return createFileDescriptor(fd, deps, nil)
 }
 
-func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+type descResolver struct {
+	files          []*FileDescriptor
+	importResolver *ImportResolver
+	fromPath       string
+}
+
+func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	resolvedPath := r.importResolver.ResolveImport(r.fromPath, path)
+	d := r.findFileByPath(resolvedPath)
+	if d != nil {
+		return d, nil
+	}
+	if resolvedPath != path {
+		d := r.findFileByPath(path)
+		if d != nil {
+			return d, nil
+		}
+	}
+	return nil, protoregistry.NotFound
+}
+
+func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor {
+	for _, fd := range r.files {
+		if fd.GetName() == path {
+			return fd.UnwrapFile()
+		}
+	}
+	return nil
+}
+
+func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) {
+	for _, fd := range r.files {
+		d := fd.FindSymbol(string(n))
+		if d != nil {
+			return d.(DescriptorWrapper).Unwrap(), nil
+		}
+	}
+	return nil, protoregistry.NotFound
+}
+
+func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+	dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()}
+	d, err := protodesc.NewFile(fd, dr)
+	if err != nil {
+		return nil, err
+	}
+
+	// make sure cache has dependencies populated
+	cache := mapCache{}
+	for _, dep := range deps {
+		fd, err := dr.FindFileByPath(dep.GetName())
+		if err != nil {
+			return nil, err
+		}
+		cache.put(fd, dep)
+	}
+
+	return convertFile(d, fd, cache)
+}
+
+func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) {
 	ret := &FileDescriptor{
+		wrapped:    d,
 		proto:      fd,
 		symbols:    map[string]Descriptor{},
 		fieldIndex: map[string]map[int32]*FieldDescriptor{},
 	}
-	pkg := fd.GetPackage()
+	cache.put(d, ret)
 
 	// populate references to file descriptor dependencies
-	files := map[string]*FileDescriptor{}
-	for _, f := range deps {
-		files[f.proto.GetName()] = f
-	}
 	ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
-	for i, d := range fd.GetDependency() {
-		resolved := r.ResolveImport(fd.GetName(), d)
-		ret.deps[i] = files[resolved]
-		if ret.deps[i] == nil {
-			if resolved != d {
-				ret.deps[i] = files[d]
-			}
-			if ret.deps[i] == nil {
-				return nil, intn.ErrNoSuchFile(d)
-			}
+	for i := 0; i < d.Imports().Len(); i++ {
+		f := d.Imports().Get(i).FileDescriptor
+		if c, err := wrapFile(f, cache); err != nil {
+			return nil, err
+		} else {
+			ret.deps[i] = c
 		}
 	}
 	ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
@@ -53,27 +109,39 @@
 	for i, wd := range fd.GetWeakDependency() {
 		ret.weakDeps[i] = ret.deps[wd]
 	}
-	ret.isProto3 = fd.GetSyntax() == "proto3"
 
 	// populate all tables of child descriptors
-	for _, m := range fd.GetMessageType() {
-		md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
-		ret.symbols[n] = md
+	path := make([]int32, 1, 8)
+	path[0] = internal.File_messagesTag
+	for i := 0; i < d.Messages().Len(); i++ {
+		src := d.Messages().Get(i)
+		srcProto := fd.GetMessageType()[src.Index()]
+		md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = md
 		ret.messages = append(ret.messages, md)
 	}
-	for _, e := range fd.GetEnumType() {
-		ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
-		ret.symbols[n] = ed
+	path[0] = internal.File_enumsTag
+	for i := 0; i < d.Enums().Len(); i++ {
+		src := d.Enums().Get(i)
+		srcProto := fd.GetEnumType()[src.Index()]
+		ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = ed
 		ret.enums = append(ret.enums, ed)
 	}
-	for _, ex := range fd.GetExtension() {
-		exd, n := createFieldDescriptor(ret, ret, pkg, ex)
-		ret.symbols[n] = exd
+	path[0] = internal.File_extensionsTag
+	for i := 0; i < d.Extensions().Len(); i++ {
+		src := d.Extensions().Get(i)
+		srcProto := fd.GetExtension()[src.Index()]
+		exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = exd
 		ret.extensions = append(ret.extensions, exd)
 	}
-	for _, s := range fd.GetService() {
-		sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
-		ret.symbols[n] = sd
+	path[0] = internal.File_servicesTag
+	for i := 0; i < d.Services().Len(); i++ {
+		src := d.Services().Get(i)
+		srcProto := fd.GetService()[src.Index()]
+		sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = sd
 		ret.services = append(ret.services, sd)
 	}
 
@@ -81,27 +149,20 @@
 	ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
 
 	// now we can resolve all type references and source code info
-	scopes := []scope{fileScope(ret)}
-	path := make([]int32, 1, 8)
-	path[0] = internal.File_messagesTag
-	for i, md := range ret.messages {
-		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, md := range ret.messages {
+		if err := md.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
-	path[0] = internal.File_enumsTag
-	for i, ed := range ret.enums {
-		ed.resolve(append(path, int32(i)))
-	}
 	path[0] = internal.File_extensionsTag
-	for i, exd := range ret.extensions {
-		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, exd := range ret.extensions {
+		if err := exd.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
 	path[0] = internal.File_servicesTag
-	for i, sd := range ret.services {
-		if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, sd := range ret.services {
+		if err := sd.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
@@ -112,15 +173,15 @@
 // CreateFileDescriptors constructs a set of descriptors, one for each of the
 // given descriptor protos. The given set of descriptor protos must include all
 // transitive dependencies for every file.
-func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
 	return createFileDescriptors(fds, nil)
 }
 
-func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
 	if len(fds) == 0 {
 		return nil, nil
 	}
-	files := map[string]*dpb.FileDescriptorProto{}
+	files := map[string]*descriptorpb.FileDescriptorProto{}
 	resolved := map[string]*FileDescriptor{}
 	var name string
 	for _, fd := range fds {
@@ -139,13 +200,13 @@
 // ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
 // file descriptors and their transitive dependencies. The files are topologically sorted
 // so that a file will always appear after its dependencies.
-func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
-	var fdps []*dpb.FileDescriptorProto
+func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet {
+	var fdps []*descriptorpb.FileDescriptorProto
 	addAllFiles(fds, &fdps, map[string]struct{}{})
-	return &dpb.FileDescriptorSet{File: fdps}
+	return &descriptorpb.FileDescriptorSet{File: fdps}
 }
 
-func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) {
 	for _, fd := range src {
 		if _, ok := seen[fd.GetName()]; ok {
 			continue
@@ -160,12 +221,13 @@
 // set's *last* file will be the returned descriptor. The set's remaining files must comprise
 // the full set of transitive dependencies of that last file. This is the same format and
 // order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
-//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+//
+//	protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
 	return createFileDescriptorFromSet(fds, nil)
 }
 
-func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
 	result, err := createFileDescriptorsFromSet(fds, r)
 	if err != nil {
 		return nil, err
@@ -180,12 +242,13 @@
 // full set of transitive dependencies for all files therein or else a link error will occur
 // and be returned instead of the slice of descriptors. This is the same format used by
 // protoc when a FileDescriptorSet file with an invocation like so:
-//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+//
+//	protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
 	return createFileDescriptorsFromSet(fds, nil)
 }
 
-func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
 	files := fds.GetFile()
 	if len(files) == 0 {
 		return nil, errors.New("file descriptor set is empty")
@@ -195,7 +258,7 @@
 
 // createFromSet creates a descriptor for the given filename. It recursively
 // creates descriptors for the given file's dependencies.
-func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
 	for _, s := range seen {
 		if filename == s {
 			return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
index 42f0f8e..68eb252 100644
--- a/vendor/github.com/jhump/protoreflect/desc/descriptor.go
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -5,12 +5,12 @@
 	"fmt"
 	"sort"
 	"strconv"
-	"strings"
 	"unicode"
 	"unicode/utf8"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc/internal"
 )
@@ -40,7 +40,7 @@
 	// descriptor. Source code info is optional. If no source code info is available for
 	// the element (including if there is none at all in the file descriptor) then this
 	// returns nil
-	GetSourceInfo() *dpb.SourceCodeInfo_Location
+	GetSourceInfo() *descriptorpb.SourceCodeInfo_Location
 	// AsProto returns the underlying descriptor proto for this descriptor.
 	AsProto() proto.Message
 }
@@ -49,7 +49,8 @@
 
 // FileDescriptor describes a proto source file.
 type FileDescriptor struct {
-	proto      *dpb.FileDescriptorProto
+	wrapped    protoreflect.FileDescriptor
+	proto      *descriptorpb.FileDescriptorProto
 	symbols    map[string]Descriptor
 	deps       []*FileDescriptor
 	publicDeps []*FileDescriptor
@@ -59,11 +60,22 @@
 	extensions []*FieldDescriptor
 	services   []*ServiceDescriptor
 	fieldIndex map[string]map[int32]*FieldDescriptor
-	isProto3   bool
 	sourceInfo internal.SourceInfoMap
 	sourceInfoRecomputeFunc
 }
 
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapFile, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor {
+	return fd.wrapped
+}
+
+// UnwrapFile returns the underlying protoreflect.FileDescriptor.
+func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor {
+	return fd.wrapped
+}
+
 func (fd *FileDescriptor) recomputeSourceInfo() {
 	internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
 }
@@ -81,18 +93,18 @@
 // to compile it, possibly including path (relative to a directory in the proto
 // import path).
 func (fd *FileDescriptor) GetName() string {
-	return fd.proto.GetName()
+	return fd.wrapped.Path()
 }
 
 // GetFullyQualifiedName returns the name of the file, same as GetName. It is
 // present to satisfy the Descriptor interface.
 func (fd *FileDescriptor) GetFullyQualifiedName() string {
-	return fd.proto.GetName()
+	return fd.wrapped.Path()
 }
 
 // GetPackage returns the name of the package declared in the file.
 func (fd *FileDescriptor) GetPackage() string {
-	return fd.proto.GetPackage()
+	return string(fd.wrapped.Package())
 }
 
 // GetParent always returns nil: files are the root of descriptor hierarchies.
@@ -115,13 +127,13 @@
 }
 
 // GetFileOptions returns the file's options.
-func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions {
 	return fd.proto.GetOptions()
 }
 
 // GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
 // interface.
-func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return nil
 }
 
@@ -133,7 +145,7 @@
 }
 
 // AsFileDescriptorProto returns the underlying descriptor proto.
-func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto {
 	return fd.proto
 }
 
@@ -143,8 +155,20 @@
 }
 
 // IsProto3 returns true if the file declares a syntax of "proto3".
+//
+// When this returns false, the file is either syntax "proto2" (if
+// Edition() returns zero) or the file uses editions.
 func (fd *FileDescriptor) IsProto3() bool {
-	return fd.isProto3
+	return fd.wrapped.Syntax() == protoreflect.Proto3
+}
+
+// Edition returns the edition of the file. If the file does not
+// use editions syntax, zero is returned.
+func (fd *FileDescriptor) Edition() descriptorpb.Edition {
+	if fd.wrapped.Syntax() == protoreflect.Editions {
+		return fd.proto.GetEdition()
+	}
+	return 0
 }
 
 // GetDependencies returns all of this file's dependencies. These correspond to
@@ -189,6 +213,9 @@
 // element with the given fully-qualified symbol name. If no such element
 // exists then this method returns nil.
 func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+	if len(symbol) == 0 {
+		return nil
+	}
 	if symbol[0] == '.' {
 		symbol = symbol[1:]
 	}
@@ -259,7 +286,8 @@
 
 // MessageDescriptor describes a protocol buffer message.
 type MessageDescriptor struct {
-	proto          *dpb.DescriptorProto
+	wrapped        protoreflect.MessageDescriptor
+	proto          *descriptorpb.DescriptorProto
 	parent         Descriptor
 	file           *FileDescriptor
 	fields         []*FieldDescriptor
@@ -268,42 +296,72 @@
 	extensions     []*FieldDescriptor
 	oneOfs         []*OneOfDescriptor
 	extRanges      extRanges
-	fqn            string
 	sourceInfoPath []int32
 	jsonNames      jsonNameMap
-	isProto3       bool
-	isMapEntry     bool
 }
 
-func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
-	msgName := merge(enclosing, md.GetName())
-	ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
-	for _, f := range md.GetField() {
-		fld, n := createFieldDescriptor(fd, ret, msgName, f)
-		symbols[n] = fld
-		ret.fields = append(ret.fields, fld)
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMessage, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor {
+	return md.wrapped
+}
+
+// UnwrapMessage returns the underlying protoreflect.MessageDescriptor.
+func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor {
+	return md.wrapped
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor {
+	ret := &MessageDescriptor{
+		wrapped:        md,
+		proto:          mdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
 	}
-	for _, nm := range md.NestedType {
-		nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
-		symbols[n] = nmd
+	cache.put(md, ret)
+	path = append(path, internal.Message_nestedMessagesTag)
+	for i := 0; i < md.Messages().Len(); i++ {
+		src := md.Messages().Get(i)
+		srcProto := mdp.GetNestedType()[src.Index()]
+		nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = nmd
 		ret.nested = append(ret.nested, nmd)
 	}
-	for _, e := range md.EnumType {
-		ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
-		symbols[n] = ed
+	path[len(path)-1] = internal.Message_enumsTag
+	for i := 0; i < md.Enums().Len(); i++ {
+		src := md.Enums().Get(i)
+		srcProto := mdp.GetEnumType()[src.Index()]
+		ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = ed
 		ret.enums = append(ret.enums, ed)
 	}
-	for _, ex := range md.GetExtension() {
-		exd, n := createFieldDescriptor(fd, ret, msgName, ex)
-		symbols[n] = exd
+	path[len(path)-1] = internal.Message_fieldsTag
+	for i := 0; i < md.Fields().Len(); i++ {
+		src := md.Fields().Get(i)
+		srcProto := mdp.GetField()[src.Index()]
+		fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = fld
+		ret.fields = append(ret.fields, fld)
+	}
+	path[len(path)-1] = internal.Message_extensionsTag
+	for i := 0; i < md.Extensions().Len(); i++ {
+		src := md.Extensions().Get(i)
+		srcProto := mdp.GetExtension()[src.Index()]
+		exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = exd
 		ret.extensions = append(ret.extensions, exd)
 	}
-	for i, o := range md.GetOneofDecl() {
-		od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
-		symbols[n] = od
+	path[len(path)-1] = internal.Message_oneOfsTag
+	for i := 0; i < md.Oneofs().Len(); i++ {
+		src := md.Oneofs().Get(i)
+		srcProto := mdp.GetOneofDecl()[src.Index()]
+		od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = od
 		ret.oneOfs = append(ret.oneOfs, od)
 	}
-	for _, r := range md.GetExtensionRange() {
+	for _, r := range mdp.GetExtensionRange() {
 		// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
 		// but protoc converts range to exclusive end in descriptor, so we must convert back
 		end := r.GetEnd() - 1
@@ -312,57 +370,39 @@
 			End:   end})
 	}
 	sort.Sort(ret.extRanges)
-	ret.isProto3 = fd.isProto3
-	ret.isMapEntry = md.GetOptions().GetMapEntry() &&
-		len(ret.fields) == 2 &&
-		ret.fields[0].GetNumber() == 1 &&
-		ret.fields[1].GetNumber() == 2
 
-	return ret, msgName
+	return ret
 }
 
-func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
-	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	path = append(path, internal.Message_nestedMessagesTag)
-	scopes = append(scopes, messageScope(md))
-	for i, nmd := range md.nested {
-		if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+func (md *MessageDescriptor) resolve(cache descriptorCache) error {
+	for _, nmd := range md.nested {
+		if err := nmd.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_enumsTag
-	for i, ed := range md.enums {
-		ed.resolve(append(path, int32(i)))
-	}
-	path[len(path)-1] = internal.Message_fieldsTag
-	for i, fld := range md.fields {
-		if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, fld := range md.fields {
+		if err := fld.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_extensionsTag
-	for i, exd := range md.extensions {
-		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, exd := range md.extensions {
+		if err := exd.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_oneOfsTag
-	for i, od := range md.oneOfs {
-		od.resolve(append(path, int32(i)))
-	}
 	return nil
 }
 
 // GetName returns the simple (unqualified) name of the message.
 func (md *MessageDescriptor) GetName() string {
-	return md.proto.GetName()
+	return string(md.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the message. This
 // includes the package name (if there is one) as well as the names of any
 // enclosing messages.
 func (md *MessageDescriptor) GetFullyQualifiedName() string {
-	return md.fqn
+	return string(md.wrapped.FullName())
 }
 
 // GetParent returns the message's enclosing descriptor. For top-level messages,
@@ -385,7 +425,7 @@
 }
 
 // GetMessageOptions returns the message's options.
-func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions {
 	return md.proto.GetOptions()
 }
 
@@ -394,7 +434,7 @@
 // returned info contains information about the location in the file where the
 // message was defined and also contains comments associated with the message
 // definition.
-func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return md.file.sourceInfo.Get(md.sourceInfoPath)
 }
 
@@ -406,7 +446,7 @@
 }
 
 // AsDescriptorProto returns the underlying descriptor proto.
-func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto {
 	return md.proto
 }
 
@@ -418,7 +458,7 @@
 // IsMapEntry returns true if this is a synthetic message type that represents an entry
 // in a map field.
 func (md *MessageDescriptor) IsMapEntry() bool {
-	return md.isMapEntry
+	return md.wrapped.IsMapEntry()
 }
 
 // GetFields returns all of the fields for this message.
@@ -448,7 +488,7 @@
 
 // IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
 func (md *MessageDescriptor) IsProto3() bool {
-	return md.isProto3
+	return md.file.IsProto3()
 }
 
 // GetExtensionRanges returns the ranges of extension field numbers for this message.
@@ -503,7 +543,7 @@
 // FindFieldByName finds the field with the given name. If no such field exists
 // then nil is returned. Only regular fields are returned, not extensions.
 func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
-	fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+	fqn := md.GetFullyQualifiedName() + "." + fieldName
 	if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
 		return fd
 	} else {
@@ -514,7 +554,7 @@
 // FindFieldByNumber finds the field with the given tag number. If no such field
 // exists then nil is returned. Only regular fields are returned, not extensions.
 func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
-	if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+	if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() {
 		return fd
 	} else {
 		return nil
@@ -523,59 +563,110 @@
 
 // FieldDescriptor describes a field of a protocol buffer message.
 type FieldDescriptor struct {
-	proto          *dpb.FieldDescriptorProto
+	wrapped        protoreflect.FieldDescriptor
+	proto          *descriptorpb.FieldDescriptorProto
 	parent         Descriptor
 	owner          *MessageDescriptor
 	file           *FileDescriptor
 	oneOf          *OneOfDescriptor
 	msgType        *MessageDescriptor
 	enumType       *EnumDescriptor
-	fqn            string
 	sourceInfoPath []int32
 	def            memoizedDefault
-	isMap          bool
 }
 
-func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
-	fldName := merge(enclosing, fld.GetName())
-	ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
-	if fld.GetExtendee() == "" {
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapField, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor {
+	return fd.wrapped
+}
+
+// UnwrapField returns the underlying protoreflect.FieldDescriptor.
+func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor {
+	return fd.wrapped
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor {
+	ret := &FieldDescriptor{
+		wrapped:        fld,
+		proto:          fldp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
+	cache.put(fld, ret)
+	if !fld.IsExtension() {
 		ret.owner = parent.(*MessageDescriptor)
 	}
 	// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
-	return ret, fldName
+	return ret
 }
 
-func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+func descriptorType(d Descriptor) string {
+	switch d := d.(type) {
+	case *FileDescriptor:
+		return "a file"
+	case *MessageDescriptor:
+		return "a message"
+	case *FieldDescriptor:
+		if d.IsExtension() {
+			return "an extension"
+		}
+		return "a field"
+	case *OneOfDescriptor:
+		return "a oneof"
+	case *EnumDescriptor:
+		return "an enum"
+	case *EnumValueDescriptor:
+		return "an enum value"
+	case *ServiceDescriptor:
+		return "a service"
+	case *MethodDescriptor:
+		return "a method"
+	default:
+		return fmt.Sprintf("a %T", d)
+	}
+}
+
+func (fd *FieldDescriptor) resolve(cache descriptorCache) error {
 	if fd.proto.OneofIndex != nil && fd.oneOf == nil {
-		return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+		return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex)
 	}
-	fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
-		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+	if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
+		desc, err := resolve(fd.file, fd.wrapped.Enum(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.enumType = desc.(*EnumDescriptor)
 		}
+		enumType, ok := desc.(*EnumDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+		}
+		fd.enumType = enumType
 	}
-	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
-		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+	if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+		desc, err := resolve(fd.file, fd.wrapped.Message(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.msgType = desc.(*MessageDescriptor)
 		}
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+		}
+		fd.msgType = msgType
 	}
-	if fd.proto.GetExtendee() != "" {
-		if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+	if fd.IsExtension() {
+		desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.owner = desc.(*MessageDescriptor)
 		}
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc))
+		}
+		fd.owner = msgType
 	}
 	fd.file.registerField(fd)
-	fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
-		fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
-		fd.GetMessageType().IsMapEntry()
 	return nil
 }
 
@@ -588,7 +679,7 @@
 		return nil
 	}
 
-	proto3 := fd.file.isProto3
+	proto3 := fd.file.IsProto3()
 	if !proto3 {
 		def := fd.AsFieldDescriptorProto().GetDefaultValue()
 		if def != "" {
@@ -601,31 +692,31 @@
 	}
 
 	switch fd.GetType() {
-	case dpb.FieldDescriptorProto_TYPE_FIXED32,
-		dpb.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		return uint32(0)
-	case dpb.FieldDescriptorProto_TYPE_SFIXED32,
-		dpb.FieldDescriptorProto_TYPE_INT32,
-		dpb.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		return int32(0)
-	case dpb.FieldDescriptorProto_TYPE_FIXED64,
-		dpb.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return uint64(0)
-	case dpb.FieldDescriptorProto_TYPE_SFIXED64,
-		dpb.FieldDescriptorProto_TYPE_INT64,
-		dpb.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return int64(0)
-	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return float32(0.0)
-	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return float64(0.0)
-	case dpb.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return false
-	case dpb.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return []byte(nil)
-	case dpb.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return ""
-	case dpb.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if proto3 {
 			return int32(0)
 		}
@@ -642,60 +733,60 @@
 
 func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
 	switch fd.GetType() {
-	case dpb.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		vd := fd.GetEnumType().FindValueByName(val)
 		if vd != nil {
 			return vd.GetNumber()
 		}
 		return nil
-	case dpb.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if val == "true" {
 			return true
 		} else if val == "false" {
 			return false
 		}
 		return nil
-	case dpb.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return []byte(unescape(val))
-	case dpb.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return val
-	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if f, err := strconv.ParseFloat(val, 32); err == nil {
 			return float32(f)
 		} else {
 			return float32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		if f, err := strconv.ParseFloat(val, 64); err == nil {
 			return f
 		} else {
 			return float64(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_INT32,
-		dpb.FieldDescriptorProto_TYPE_SINT32,
-		dpb.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if i, err := strconv.ParseInt(val, 10, 32); err == nil {
 			return int32(i)
 		} else {
 			return int32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_UINT32,
-		dpb.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if i, err := strconv.ParseUint(val, 10, 32); err == nil {
 			return uint32(i)
 		} else {
 			return uint32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_INT64,
-		dpb.FieldDescriptorProto_TYPE_SINT64,
-		dpb.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		if i, err := strconv.ParseInt(val, 10, 64); err == nil {
 			return i
 		} else {
 			return int64(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_UINT64,
-		dpb.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		if i, err := strconv.ParseUint(val, 10, 64); err == nil {
 			return i
 		} else {
@@ -827,12 +918,12 @@
 
 // GetName returns the name of the field.
 func (fd *FieldDescriptor) GetName() string {
-	return fd.proto.GetName()
+	return string(fd.wrapped.Name())
 }
 
 // GetNumber returns the tag number of this field.
 func (fd *FieldDescriptor) GetNumber() int32 {
-	return fd.proto.GetNumber()
+	return int32(fd.wrapped.Number())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the field. Unlike
@@ -846,7 +937,7 @@
 // If this field is part of a one-of, the fully qualified name does *not*
 // include the name of the one-of, only of the enclosing message.
 func (fd *FieldDescriptor) GetFullyQualifiedName() string {
-	return fd.fqn
+	return string(fd.wrapped.FullName())
 }
 
 // GetParent returns the fields's enclosing descriptor. For normal
@@ -871,7 +962,7 @@
 }
 
 // GetFieldOptions returns the field's options.
-func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions {
 	return fd.proto.GetOptions()
 }
 
@@ -880,7 +971,7 @@
 // returned info contains information about the location in the file where the
 // field was defined and also contains comments associated with the field
 // definition.
-func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return fd.file.sourceInfo.Get(fd.sourceInfoPath)
 }
 
@@ -892,7 +983,7 @@
 }
 
 // AsFieldDescriptorProto returns the underlying descriptor proto.
-func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
 	return fd.proto
 }
 
@@ -962,7 +1053,7 @@
 
 // IsExtension returns true if this is an extension field.
 func (fd *FieldDescriptor) IsExtension() bool {
-	return fd.proto.GetExtendee() != ""
+	return fd.wrapped.IsExtension()
 }
 
 // GetOneOf returns the one-of field set to which this field belongs. If this field
@@ -974,24 +1065,24 @@
 // GetType returns the type of this field. If the type indicates an enum, the
 // enum type can be queried via GetEnumType. If the type indicates a message, the
 // message type can be queried via GetMessageType.
-func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type {
 	return fd.proto.GetType()
 }
 
 // GetLabel returns the label for this field. The label can be required (proto2-only),
 // optional (default for proto3), or required.
-func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label {
 	return fd.proto.GetLabel()
 }
 
 // IsRequired returns true if this field has the "required" label.
 func (fd *FieldDescriptor) IsRequired() bool {
-	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+	return fd.wrapped.Cardinality() == protoreflect.Required
 }
 
 // IsRepeated returns true if this field has the "repeated" label.
 func (fd *FieldDescriptor) IsRepeated() bool {
-	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+	return fd.wrapped.Cardinality() == protoreflect.Repeated
 }
 
 // IsProto3Optional returns true if this field has an explicit "optional" label
@@ -999,30 +1090,27 @@
 // extensions), will be nested in synthetic oneofs that contain only the single
 // field.
 func (fd *FieldDescriptor) IsProto3Optional() bool {
-	return internal.GetProto3Optional(fd.proto)
+	return fd.proto.GetProto3Optional()
 }
 
 // HasPresence returns true if this field can distinguish when a value is
 // present or not. Scalar fields in "proto3" syntax files, for example, return
 // false since absent values are indistinguishable from zero values.
 func (fd *FieldDescriptor) HasPresence() bool {
-	if !fd.file.isProto3 {
-		return true
-	}
-	return fd.msgType != nil || fd.oneOf != nil
+	return fd.wrapped.HasPresence()
 }
 
 // IsMap returns true if this is a map field. If so, it will have the "repeated"
 // label its type will be a message that represents a map entry. The map entry
 // message will have exactly two fields: tag #1 is the key and tag #2 is the value.
 func (fd *FieldDescriptor) IsMap() bool {
-	return fd.isMap
+	return fd.wrapped.IsMap()
 }
 
 // GetMapKeyType returns the type of the key field if this is a map field. If it is
 // not a map field, nil is returned.
 func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
-	if fd.isMap {
+	if fd.IsMap() {
 		return fd.msgType.FindFieldByNumber(int32(1))
 	}
 	return nil
@@ -1031,7 +1119,7 @@
 // GetMapValueType returns the type of the value field if this is a map field. If it
 // is not a map field, nil is returned.
 func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
-	if fd.isMap {
+	if fd.IsMap() {
 		return fd.msgType.FindFieldByNumber(int32(2))
 	}
 	return nil
@@ -1060,40 +1148,66 @@
 // Otherwise, it returns the declared default value for the field or a zero value, if no
 // default is declared or if the file is proto3. The type of said return value corresponds
 // to the type of the field:
-//  +-------------------------+-----------+
-//  |       Declared Type     |  Go Type  |
-//  +-------------------------+-----------+
-//  | int32, sint32, sfixed32 | int32     |
-//  | int64, sint64, sfixed64 | int64     |
-//  | uint32, fixed32         | uint32    |
-//  | uint64, fixed64         | uint64    |
-//  | float                   | float32   |
-//  | double                  | double32  |
-//  | bool                    | bool      |
-//  | string                  | string    |
-//  | bytes                   | []byte    |
-//  +-------------------------+-----------+
+//
+//	+-------------------------+-----------+
+//	|       Declared Type     |  Go Type  |
+//	+-------------------------+-----------+
+//	| int32, sint32, sfixed32 | int32     |
+//	| int64, sint64, sfixed64 | int64     |
+//	| uint32, fixed32         | uint32    |
+//	| uint64, fixed64         | uint64    |
+//	| float                   | float32   |
+//	| double                  | double32  |
+//	| bool                    | bool      |
+//	| string                  | string    |
+//	| bytes                   | []byte    |
+//	+-------------------------+-----------+
 func (fd *FieldDescriptor) GetDefaultValue() interface{} {
 	return fd.getDefaultValue()
 }
 
 // EnumDescriptor describes an enum declared in a proto file.
 type EnumDescriptor struct {
-	proto          *dpb.EnumDescriptorProto
+	wrapped        protoreflect.EnumDescriptor
+	proto          *descriptorpb.EnumDescriptorProto
 	parent         Descriptor
 	file           *FileDescriptor
 	values         []*EnumValueDescriptor
 	valuesByNum    sortedValues
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
-	enumName := merge(enclosing, ed.GetName())
-	ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
-	for _, ev := range ed.GetValue() {
-		evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
-		symbols[n] = evd
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnum, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor {
+	return ed.wrapped
+}
+
+// UnwrapEnum returns the underlying protoreflect.EnumDescriptor.
+func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor {
+	return ed.wrapped
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor {
+	ret := &EnumDescriptor{
+		wrapped:        ed,
+		proto:          edp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
+	path = append(path, internal.Enum_valuesTag)
+	for i := 0; i < ed.Values().Len(); i++ {
+		src := ed.Values().Get(i)
+		srcProto := edp.GetValue()[src.Index()]
+		evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = evd
+		// NB: for backwards compatibility, also register the enum value as if
+		// scoped within the enum (counter-intuitively, enum value full names are
+		// scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName
+		// returns that alternate full name.
+		symbols[evd.GetFullyQualifiedName()] = evd
 		ret.values = append(ret.values, evd)
 	}
 	if len(ret.values) > 0 {
@@ -1101,7 +1215,7 @@
 		copy(ret.valuesByNum, ret.values)
 		sort.Stable(ret.valuesByNum)
 	}
-	return ret, enumName
+	return ret
 }
 
 type sortedValues []*EnumValueDescriptor
@@ -1116,26 +1230,19 @@
 
 func (sv sortedValues) Swap(i, j int) {
 	sv[i], sv[j] = sv[j], sv[i]
-}
 
-func (ed *EnumDescriptor) resolve(path []int32) {
-	ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	path = append(path, internal.Enum_valuesTag)
-	for i, evd := range ed.values {
-		evd.resolve(append(path, int32(i)))
-	}
 }
 
 // GetName returns the simple (unqualified) name of the enum type.
 func (ed *EnumDescriptor) GetName() string {
-	return ed.proto.GetName()
+	return string(ed.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the enum type.
 // This includes the package name (if there is one) as well as the names of any
 // enclosing messages.
 func (ed *EnumDescriptor) GetFullyQualifiedName() string {
-	return ed.fqn
+	return string(ed.wrapped.FullName())
 }
 
 // GetParent returns the enum type's enclosing descriptor. For top-level enums,
@@ -1158,7 +1265,7 @@
 }
 
 // GetEnumOptions returns the enum type's options.
-func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions {
 	return ed.proto.GetOptions()
 }
 
@@ -1167,7 +1274,7 @@
 // returned info contains information about the location in the file where the
 // enum type was defined and also contains comments associated with the enum
 // definition.
-func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return ed.file.sourceInfo.Get(ed.sourceInfoPath)
 }
 
@@ -1179,7 +1286,7 @@
 }
 
 // AsEnumDescriptorProto returns the underlying descriptor proto.
-func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto {
 	return ed.proto
 }
 
@@ -1196,7 +1303,7 @@
 // FindValueByName finds the enum value with the given name. If no such value exists
 // then nil is returned.
 func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
-	fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+	fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name)
 	if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
 		return vd
 	} else {
@@ -1220,16 +1327,33 @@
 
 // EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
 type EnumValueDescriptor struct {
-	proto          *dpb.EnumValueDescriptorProto
+	wrapped        protoreflect.EnumValueDescriptor
+	proto          *descriptorpb.EnumValueDescriptorProto
 	parent         *EnumDescriptor
 	file           *FileDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
-	valName := merge(enclosing, evd.GetName())
-	return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnumValue, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor {
+	return vd.wrapped
+}
+
+// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor.
+func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor {
+	return vd.wrapped
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor {
+	return &EnumValueDescriptor{
+		wrapped:        evd,
+		proto:          evdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 }
 
 func (vd *EnumValueDescriptor) resolve(path []int32) {
@@ -1238,18 +1362,24 @@
 
 // GetName returns the name of the enum value.
 func (vd *EnumValueDescriptor) GetName() string {
-	return vd.proto.GetName()
+	return string(vd.wrapped.Name())
 }
 
 // GetNumber returns the numeric value associated with this enum value.
 func (vd *EnumValueDescriptor) GetNumber() int32 {
-	return vd.proto.GetNumber()
+	return int32(vd.wrapped.Number())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the enum value.
 // Unlike GetName, this includes fully qualified name of the enclosing enum.
 func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
-	return vd.fqn
+	// NB: Technically, we do not return the correct value. Enum values are
+	// scoped within the enclosing element, not within the enum itself (which
+	// is very non-intuitive, but it follows C++ scoping rules). The value
+	// returned from vd.wrapped.FullName() is correct. However, we return
+	// something different, just for backwards compatibility, as this package
+	// has always instead returned the name scoped inside the enum.
+	return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name())
 }
 
 // GetParent returns the descriptor for the enum in which this enum value is
@@ -1278,7 +1408,7 @@
 }
 
 // GetEnumValueOptions returns the enum value's options.
-func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions {
 	return vd.proto.GetOptions()
 }
 
@@ -1287,7 +1417,7 @@
 // returned info contains information about the location in the file where the
 // enum value was defined and also contains comments associated with the enum
 // value definition.
-func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return vd.file.sourceInfo.Get(vd.sourceInfoPath)
 }
 
@@ -1299,7 +1429,7 @@
 }
 
 // AsEnumValueDescriptorProto returns the underlying descriptor proto.
-func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto {
 	return vd.proto
 }
 
@@ -1310,29 +1440,46 @@
 
 // ServiceDescriptor describes an RPC service declared in a proto file.
 type ServiceDescriptor struct {
-	proto          *dpb.ServiceDescriptorProto
+	wrapped        protoreflect.ServiceDescriptor
+	proto          *descriptorpb.ServiceDescriptorProto
 	file           *FileDescriptor
 	methods        []*MethodDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
-	serviceName := merge(enclosing, sd.GetName())
-	ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
-	for _, m := range sd.GetMethod() {
-		md, n := createMethodDescriptor(fd, ret, serviceName, m)
-		symbols[n] = md
-		ret.methods = append(ret.methods, md)
-	}
-	return ret, serviceName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapService, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor {
+	return sd.wrapped
 }
 
-func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
-	sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+// UnwrapService returns the underlying protoreflect.ServiceDescriptor.
+func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor {
+	return sd.wrapped
+}
+
+func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor {
+	ret := &ServiceDescriptor{
+		wrapped:        sd,
+		proto:          sdp,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 	path = append(path, internal.Service_methodsTag)
-	for i, md := range sd.methods {
-		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+	for i := 0; i < sd.Methods().Len(); i++ {
+		src := sd.Methods().Get(i)
+		srcProto := sdp.GetMethod()[src.Index()]
+		md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = md
+		ret.methods = append(ret.methods, md)
+	}
+	return ret
+}
+
+func (sd *ServiceDescriptor) resolve(cache descriptorCache) error {
+	for _, md := range sd.methods {
+		if err := md.resolve(cache); err != nil {
 			return err
 		}
 	}
@@ -1341,13 +1488,13 @@
 
 // GetName returns the simple (unqualified) name of the service.
 func (sd *ServiceDescriptor) GetName() string {
-	return sd.proto.GetName()
+	return string(sd.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the service. This
 // includes the package name (if there is one).
 func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
-	return sd.fqn
+	return string(sd.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the file in which this service is
@@ -1370,7 +1517,7 @@
 }
 
 // GetServiceOptions returns the service's options.
-func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions {
 	return sd.proto.GetOptions()
 }
 
@@ -1379,7 +1526,7 @@
 // returned info contains information about the location in the file where the
 // service was defined and also contains comments associated with the service
 // definition.
-func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return sd.file.sourceInfo.Get(sd.sourceInfoPath)
 }
 
@@ -1391,7 +1538,7 @@
 }
 
 // AsServiceDescriptorProto returns the underlying descriptor proto.
-func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto {
 	return sd.proto
 }
 
@@ -1408,7 +1555,7 @@
 // FindMethodByName finds the method with the given name. If no such method exists
 // then nil is returned.
 func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
-	fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+	fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name)
 	if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
 		return md
 	} else {
@@ -1418,45 +1565,69 @@
 
 // MethodDescriptor describes an RPC method declared in a proto file.
 type MethodDescriptor struct {
-	proto          *dpb.MethodDescriptorProto
+	wrapped        protoreflect.MethodDescriptor
+	proto          *descriptorpb.MethodDescriptorProto
 	parent         *ServiceDescriptor
 	file           *FileDescriptor
 	inType         *MessageDescriptor
 	outType        *MessageDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
-	// request and response types get resolved later
-	methodName := merge(enclosing, md.GetName())
-	return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMethod, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor {
+	return md.wrapped
 }
 
-func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
-	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
-		return err
-	} else {
-		md.inType = desc.(*MessageDescriptor)
+// UnwrapMethod returns the underlying protoreflect.MethodDescriptor.
+func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor {
+	return md.wrapped
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor {
+	// request and response types get resolved later
+	return &MethodDescriptor{
+		wrapped:        md,
+		proto:          mdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
 	}
-	if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+}
+
+func (md *MethodDescriptor) resolve(cache descriptorCache) error {
+	if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil {
 		return err
 	} else {
-		md.outType = desc.(*MessageDescriptor)
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc))
+		}
+		md.inType = msgType
+	}
+	if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil {
+		return err
+	} else {
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc))
+		}
+		md.outType = msgType
 	}
 	return nil
 }
 
 // GetName returns the name of the method.
 func (md *MethodDescriptor) GetName() string {
-	return md.proto.GetName()
+	return string(md.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the method. Unlike
 // GetName, this includes fully qualified name of the enclosing service.
 func (md *MethodDescriptor) GetFullyQualifiedName() string {
-	return md.fqn
+	return string(md.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the service in which this method is
@@ -1485,7 +1656,7 @@
 }
 
 // GetMethodOptions returns the method's options.
-func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions {
 	return md.proto.GetOptions()
 }
 
@@ -1494,7 +1665,7 @@
 // returned info contains information about the location in the file where the
 // method was defined and also contains comments associated with the method
 // definition.
-func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return md.file.sourceInfo.Get(md.sourceInfoPath)
 }
 
@@ -1506,7 +1677,7 @@
 }
 
 // AsMethodDescriptorProto returns the underlying descriptor proto.
-func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto {
 	return md.proto
 }
 
@@ -1517,12 +1688,12 @@
 
 // IsServerStreaming returns true if this is a server-streaming method.
 func (md *MethodDescriptor) IsServerStreaming() bool {
-	return md.proto.GetServerStreaming()
+	return md.wrapped.IsStreamingServer()
 }
 
 // IsClientStreaming returns true if this is a client-streaming method.
 func (md *MethodDescriptor) IsClientStreaming() bool {
-	return md.proto.GetClientStreaming()
+	return md.wrapped.IsStreamingClient()
 }
 
 // GetInputType returns the input type, or request type, of the RPC method.
@@ -1537,17 +1708,34 @@
 
 // OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
 type OneOfDescriptor struct {
-	proto          *dpb.OneofDescriptorProto
+	wrapped        protoreflect.OneofDescriptor
+	proto          *descriptorpb.OneofDescriptorProto
 	parent         *MessageDescriptor
 	file           *FileDescriptor
 	choices        []*FieldDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
-	oneOfName := merge(enclosing, od.GetName())
-	ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapOneOf, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor {
+	return od.wrapped
+}
+
+// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor.
+func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor {
+	return od.wrapped
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor {
+	ret := &OneOfDescriptor{
+		wrapped:        od,
+		proto:          odp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 	for _, f := range parent.fields {
 		oi := f.proto.OneofIndex
 		if oi != nil && *oi == int32(index) {
@@ -1555,22 +1743,18 @@
 			ret.choices = append(ret.choices, f)
 		}
 	}
-	return ret, oneOfName
-}
-
-func (od *OneOfDescriptor) resolve(path []int32) {
-	od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	return ret
 }
 
 // GetName returns the name of the one-of.
 func (od *OneOfDescriptor) GetName() string {
-	return od.proto.GetName()
+	return string(od.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
 // GetName, this includes fully qualified name of the enclosing message.
 func (od *OneOfDescriptor) GetFullyQualifiedName() string {
-	return od.fqn
+	return string(od.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the message in which this one-of is
@@ -1599,7 +1783,7 @@
 }
 
 // GetOneOfOptions returns the one-of's options.
-func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions {
 	return od.proto.GetOptions()
 }
 
@@ -1608,7 +1792,7 @@
 // returned info contains information about the location in the file where the
 // one-of was defined and also contains comments associated with the one-of
 // definition.
-func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return od.file.sourceInfo.Get(od.sourceInfoPath)
 }
 
@@ -1620,7 +1804,7 @@
 }
 
 // AsOneofDescriptorProto returns the underlying descriptor proto.
-func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto {
 	return od.proto
 }
 
@@ -1636,88 +1820,28 @@
 }
 
 func (od *OneOfDescriptor) IsSynthetic() bool {
-	return len(od.choices) == 1 && od.choices[0].IsProto3Optional()
+	return od.wrapped.IsSynthetic()
 }
 
-// scope represents a lexical scope in a proto file in which messages and enums
-// can be declared.
-type scope func(string) Descriptor
-
-func fileScope(fd *FileDescriptor) scope {
-	// we search symbols in this file, but also symbols in other files that have
-	// the same package as this file or a "parent" package (in protobuf,
-	// packages are a hierarchy like C++ namespaces)
-	prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
-	return func(name string) Descriptor {
-		for _, prefix := range prefixes {
-			n := merge(prefix, name)
-			d := findSymbol(fd, n, false)
-			if d != nil {
-				return d
-			}
-		}
-		return nil
+func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+	d := cache.get(src)
+	if d != nil {
+		return d, nil
 	}
-}
 
-func messageScope(md *MessageDescriptor) scope {
-	return func(name string) Descriptor {
-		n := merge(md.fqn, name)
-		if d, ok := md.file.symbols[n]; ok {
-			return d
-		}
-		return nil
+	fqn := string(src.FullName())
+
+	d = fd.FindSymbol(fqn)
+	if d != nil {
+		return d, nil
 	}
-}
 
-func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
-	if strings.HasPrefix(name, ".") {
-		// already fully-qualified
-		d := findSymbol(fd, name[1:], false)
+	for _, dep := range fd.deps {
+		d := dep.FindSymbol(fqn)
 		if d != nil {
 			return d, nil
 		}
-	} else {
-		// unqualified, so we look in the enclosing (last) scope first and move
-		// towards outermost (first) scope, trying to resolve the symbol
-		for i := len(scopes) - 1; i >= 0; i-- {
-			d := scopes[i](name)
-			if d != nil {
-				return d, nil
-			}
-		}
-	}
-	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
-}
-
-func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
-	d := fd.symbols[name]
-	if d != nil {
-		return d
 	}
 
-	// When public = false, we are searching only directly imported symbols. But we
-	// also need to search transitive public imports due to semantics of public imports.
-	var deps []*FileDescriptor
-	if public {
-		deps = fd.publicDeps
-	} else {
-		deps = fd.deps
-	}
-	for _, dep := range deps {
-		d = findSymbol(dep, name, true)
-		if d != nil {
-			return d
-		}
-	}
-
-	return nil
-}
-
-func merge(a, b string) string {
-	if a == "" {
-		return b
-	} else {
-		return a + "." + b
-	}
+	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn)
 }
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
index 1740dce..07bcbf3 100644
--- a/vendor/github.com/jhump/protoreflect/desc/doc.go
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -24,18 +24,47 @@
 // properties that are not immediately accessible through rich descriptor's
 // methods.
 //
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+//
+// # Loading Descriptors
+//
 // Rich descriptors can be accessed in similar ways as their "poor" cousins
 // (descriptor protos). Instead of using proto.FileDescriptor, use
 // desc.LoadFileDescriptor. Message descriptors and extension field descriptors
 // can also be easily accessed using desc.LoadMessageDescriptor and
 // desc.LoadFieldDescriptorForExtension, respectively.
 //
+// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then
+// the descriptors returned from these Load* functions will include source code
+// information, and thus include comments for elements.
+//
+// # Creating Descriptors
+//
 // It is also possible create rich descriptors for proto messages that a given
 // Go program doesn't even know about. For example, they could be loaded from a
 // FileDescriptorSet file (which can be generated by protoc) or loaded from a
 // server. This enables interesting things like dynamic clients: where a Go
 // program can be an RPC client of a service it wasn't compiled to know about.
 //
-// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
-// repo to see just how useful rich descriptors really are.
+// You cannot create a message descriptor without also creating its enclosing
+// file, because the enclosing file is what contains other relevant information
+// like other symbols and dependencies/imports, which is how type references
+// are resolved (such as when a field in a message has a type that is another
+// message or enum).
+//
+// So the functions in this package for creating descriptors are all for
+// creating *file* descriptors. See the various Create* functions for more
+// information.
+//
+// Also see the desc/builder sub-package, for another API that makes it easier
+// to synthesize descriptors programmatically.
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/desc package,
+// see [google.golang.org/protobuf/reflect/protoreflect].
+//
+// [google.golang.org/protobuf/reflect/protoreflect]: https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect
 package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
index ab93032..dc6b735 100644
--- a/vendor/github.com/jhump/protoreflect/desc/imports.go
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -8,7 +8,8 @@
 	"sync"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 var (
@@ -37,8 +38,8 @@
 	if len(importPath) == 0 {
 		panic("import path cannot be empty")
 	}
-	desc := proto.FileDescriptor(registerPath)
-	if len(desc) == 0 {
+	_, err := protoregistry.GlobalFiles.FindFileByPath(registerPath)
+	if err != nil {
 		panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
 	}
 	globalImportPathMu.Lock()
@@ -76,30 +77,40 @@
 //
 // For example, let's say we have two proto source files: "foo/bar.proto" and
 // "fubar/baz.proto". The latter imports the former using a line like so:
-//    import "foo/bar.proto";
+//
+//	import "foo/bar.proto";
+//
 // However, when protoc is invoked, the command-line args looks like so:
-//    protoc -Ifoo/ --go_out=foo/ bar.proto
-//    protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
+//	protoc -Ifoo/ --go_out=foo/ bar.proto
+//	protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
 // Because the path given to protoc is just "bar.proto" and "baz.proto", this is
 // how they are registered in the Go protobuf runtime. So, when loading the
 // descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
 // but will find no file registered with that path:
-//    fd, err := desc.LoadFileDescriptor("baz.proto")
-//    // err will be non-nil, complaining that there is no such file
-//    // found named "foo/bar.proto"
+//
+//	fd, err := desc.LoadFileDescriptor("baz.proto")
+//	// err will be non-nil, complaining that there is no such file
+//	// found named "foo/bar.proto"
 //
 // This can be remedied by registering alternate import paths using an
 // ImportResolver. Continuing with the example above, the code below would fix
 // any link issue:
-//    var r desc.ImportResolver
-//    r.RegisterImportPath("bar.proto", "foo/bar.proto")
-//    fd, err := r.LoadFileDescriptor("baz.proto")
-//    // err will be nil; descriptor successfully loaded!
+//
+//	var r desc.ImportResolver
+//	r.RegisterImportPath("bar.proto", "foo/bar.proto")
+//	fd, err := r.LoadFileDescriptor("baz.proto")
+//	// err will be nil; descriptor successfully loaded!
 //
 // If there are files that are *always* imported using a different relative
 // path then how they are registered, consider using the global
 // RegisterImportPath function, so you don't have to use an ImportResolver for
 // every file that imports it.
+//
+// Note that the new protobuf runtime (v1.4+) verifies that import paths are
+// correct and that descriptors can be linked during package initialization. So
+// customizing import paths for descriptor resolution is no longer necessary.
 type ImportResolver struct {
 	children    map[string]*ImportResolver
 	importPaths map[string]string
@@ -134,7 +145,7 @@
 		return r.importPaths[importPath]
 	}
 	var car, cdr string
-	idx := strings.IndexRune(source, filepath.Separator)
+	idx := strings.IndexRune(source, '/')
 	if idx < 0 {
 		car, cdr = source, ""
 	} else {
@@ -205,7 +216,7 @@
 		return
 	}
 	var car, cdr string
-	idx := strings.IndexRune(source, filepath.Separator)
+	idx := strings.IndexRune(source, '/')
 	if idx < 0 {
 		car, cdr = source, ""
 	} else {
@@ -226,86 +237,86 @@
 // any alternate paths configured in this resolver are used when linking the
 // given descriptor proto.
 func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
-	return loadFileDescriptor(filePath, r)
+	return LoadFileDescriptor(filePath)
 }
 
 // LoadMessageDescriptor is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking
 // files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
-	return loadMessageDescriptor(msgName, r)
+	return LoadMessageDescriptor(msgName)
 }
 
 // LoadMessageDescriptorForMessage is the same as the package function of the
 // same name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForMessage(msg, r)
+	return LoadMessageDescriptorForMessage(msg)
 }
 
 // LoadMessageDescriptorForType is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForType(msgType, r)
+	return LoadMessageDescriptorForType(msgType)
 }
 
 // LoadEnumDescriptorForEnum is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForEnum(enum, r)
+	return LoadEnumDescriptorForEnum(enum)
 }
 
 // LoadEnumDescriptorForType is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForType(enumType, r)
+	return LoadEnumDescriptorForType(enumType)
 }
 
 // LoadFieldDescriptorForExtension is the same as the package function of the
 // same name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
-	return loadFieldDescriptorForExtension(ext, r)
+	return LoadFieldDescriptorForExtension(ext)
 }
 
 // CreateFileDescriptor is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking the
 // given descriptor proto.
-func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
 	return createFileDescriptor(fdp, deps, r)
 }
 
 // CreateFileDescriptors is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking the
 // given descriptor protos.
-func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
 	return createFileDescriptors(fds, r)
 }
 
 // CreateFileDescriptorFromSet is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
 	return createFileDescriptorFromSet(fds, r)
 }
 
 // CreateFileDescriptorsFromSet is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
 	return createFileDescriptorsFromSet(fds, r)
 }
 
-const dotPrefix = "." + string(filepath.Separator)
+const dotPrefix = "./"
 
 func clean(path string) string {
 	if path == "" {
 		return ""
 	}
-	path = filepath.Clean(path)
+	path = filepath.ToSlash(filepath.Clean(path))
 	if path == "." {
 		return ""
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
index 9aa4a3e..aa8c3e9 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
@@ -1,86 +1,37 @@
 package internal
 
 import (
-	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"github.com/jhump/protoreflect/internal/codec"
-	"reflect"
 	"strings"
 
-	"github.com/jhump/protoreflect/internal"
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
-// NB: We use reflection or unknown fields in case we are linked against an older
-// version of the proto runtime which does not know about the proto3_optional field.
-// We don't require linking with newer version (which would greatly simplify this)
-// because that means pulling in v1.4+ of the protobuf runtime, which has some
-// compatibility issues. (We'll be nice to users and not require they upgrade to
-// that latest runtime to upgrade to newer protoreflect.)
-
-func GetProto3Optional(fd *dpb.FieldDescriptorProto) bool {
-	type newerFieldDesc interface {
-		GetProto3Optional() bool
-	}
-	var pm proto.Message = fd
-	if fd, ok := pm.(newerFieldDesc); ok {
-		return fd.GetProto3Optional()
-	}
-
-	// Field does not exist, so we have to examine unknown fields
-	// (we just silently bail if we have problems parsing them)
-	unk := internal.GetUnrecognized(pm)
-	buf := codec.NewBuffer(unk)
-	for {
-		tag, wt, err := buf.DecodeTagAndWireType()
-		if err != nil {
-			return false
-		}
-		if tag == Field_proto3OptionalTag && wt == proto.WireVarint {
-			v, _ := buf.DecodeVarint()
-			return v != 0
-		}
-		if err := buf.SkipField(wt); err != nil {
-			return false
-		}
-	}
-}
-
-func SetProto3Optional(fd *dpb.FieldDescriptorProto) {
-	rv := reflect.ValueOf(fd).Elem()
-	fld := rv.FieldByName("Proto3Optional")
-	if fld.IsValid() {
-		fld.Set(reflect.ValueOf(proto.Bool(true)))
-		return
-	}
-
-	// Field does not exist, so we have to store as unknown field.
-	var buf codec.Buffer
-	if err := buf.EncodeTagAndWireType(Field_proto3OptionalTag, proto.WireVarint); err != nil {
-		// TODO: panic? log?
-		return
-	}
-	if err := buf.EncodeVarint(1); err != nil {
-		// TODO: panic? log?
-		return
-	}
-	internal.SetUnrecognized(fd, buf.Bytes())
-}
-
 // ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor
 // for each proto3 optional field. It also updates the fields to have the correct
-// oneof index reference.
-func ProcessProto3OptionalFields(msgd *dpb.DescriptorProto) {
+// oneof index reference. The given callback, if not nil, is called for each synthetic
+// oneof created.
+func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) {
 	var allNames map[string]struct{}
 	for _, fd := range msgd.Field {
-		if GetProto3Optional(fd) {
+		if fd.GetProto3Optional() {
 			// lazy init the set of all names
 			if allNames == nil {
 				allNames = map[string]struct{}{}
 				for _, fd := range msgd.Field {
 					allNames[fd.GetName()] = struct{}{}
 				}
-				for _, fd := range msgd.Extension {
-					allNames[fd.GetName()] = struct{}{}
+				for _, od := range msgd.OneofDecl {
+					allNames[od.GetName()] = struct{}{}
+				}
+				// NB: protoc only considers names of other fields and oneofs
+				// when computing the synthetic oneof name. But that feels like
+				// a bug, since it means it could generate a name that conflicts
+				// with some other symbol defined in the message. If it's decided
+				// that's NOT a bug and is desirable, then we should remove the
+				// following four loops to mimic protoc's behavior.
+				for _, xd := range msgd.Extension {
+					allNames[xd.GetName()] = struct{}{}
 				}
 				for _, ed := range msgd.EnumType {
 					allNames[ed.GetName()] = struct{}{}
@@ -114,7 +65,11 @@
 			}
 
 			fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
-			msgd.OneofDecl = append(msgd.OneofDecl, &dpb.OneofDescriptorProto{Name: proto.String(ooName)})
+			ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)}
+			msgd.OneofDecl = append(msgd.OneofDecl, ood)
+			if callback != nil {
+				callback(fd, ood)
+			}
 		}
 	}
 }
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
new file mode 100644
index 0000000..d7259e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
@@ -0,0 +1,67 @@
+package internal
+
+import (
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/dynamicpb"
+)
+
+// RegisterExtensionsFromImportedFile registers extensions in the given file as well
+// as those in its public imports. So if another file imports the given fd, this adds
+// all extensions made visible to that importing file.
+//
+// All extensions in the given file are made visible to the importing file, and so are
+// extensions in any public imports in the given file.
+func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, true, true)
+}
+
+// RegisterExtensionsVisibleToFile registers all extensions visible to the given file.
+// This includes all extensions defined in fd and as well as extensions defined in the
+// files that it imports (and any public imports thereof, etc).
+//
+// This is effectively the same as registering the extensions in fd and then calling
+// RegisterExtensionsFromImportedFile for each file imported by fd.
+func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, true, false)
+}
+
+// RegisterTypesVisibleToFile registers all types visible to the given file.
+// This is the same as RegisterExtensionsVisibleToFile but it also registers
+// message and enum types, not just extensions.
+func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, false, false)
+}
+
+func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) {
+	registerTypes(reg, fd, extensionsOnly)
+	for i := 0; i < fd.Imports().Len(); i++ {
+		imp := fd.Imports().Get(i)
+		if imp.IsPublic || !publicImportsOnly {
+			registerTypesForFile(reg, imp, extensionsOnly, true)
+		}
+	}
+}
+
+func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) {
+	for i := 0; i < elem.Extensions().Len(); i++ {
+		_ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i)))
+	}
+	if !extensionsOnly {
+		for i := 0; i < elem.Messages().Len(); i++ {
+			_ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i)))
+		}
+		for i := 0; i < elem.Enums().Len(); i++ {
+			_ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i)))
+		}
+	}
+	for i := 0; i < elem.Messages().Len(); i++ {
+		registerTypes(reg, elem.Messages().Get(i), extensionsOnly)
+	}
+}
+
+type fileOrMessage interface {
+	Extensions() protoreflect.ExtensionDescriptors
+	Messages() protoreflect.MessageDescriptors
+	Enums() protoreflect.EnumDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
index b4150b8..6037128 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -1,16 +1,16 @@
 package internal
 
 import (
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 // SourceInfoMap is a map of paths in a descriptor to the corresponding source
 // code info.
-type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location
 
 // Get returns the source code info for the given path. If there are
 // multiple locations for the same path, the first one is returned.
-func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location {
 	v := m[asMapKey(path)]
 	if len(v) > 0 {
 		return v[0]
@@ -19,24 +19,24 @@
 }
 
 // GetAll returns all source code info for the given path.
-func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location {
 	return m[asMapKey(path)]
 }
 
 // Add stores the given source code info for the given path.
-func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) {
 	m[asMapKey(path)] = append(m[asMapKey(path)], loc)
 }
 
 // PutIfAbsent stores the given source code info for the given path only if the
 // given path does not exist in the map. This method returns true when the value
 // is stored, false if the path already exists.
-func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool {
 	k := asMapKey(path)
 	if _, ok := m[k]; ok {
 		return false
 	}
-	m[k] = []*dpb.SourceCodeInfo_Location{loc}
+	m[k] = []*descriptorpb.SourceCodeInfo_Location{loc}
 	return true
 }
 
@@ -63,7 +63,7 @@
 
 // CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
 // source code info in the given file descriptor proto.
-func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap {
 	res := SourceInfoMap{}
 	PopulateSourceInfoMap(fd, res)
 	return res
@@ -71,7 +71,7 @@
 
 // PopulateSourceInfoMap populates the given SourceInfoMap with information from
 // the given file descriptor.
-func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) {
 	for _, l := range fd.GetSourceCodeInfo().GetLocation() {
 		m.Add(l.Path, l)
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
index 71855bf..595c872 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/util.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -54,6 +54,9 @@
 	// File_syntaxTag is the tag number of the syntax element in a file
 	// descriptor proto.
 	File_syntaxTag = 12
+	// File_editionTag is the tag number of the edition element in a file
+	// descriptor proto.
+	File_editionTag = 14
 	// Message_nameTag is the tag number of the name element in a message
 	// descriptor proto.
 	Message_nameTag = 1
@@ -219,17 +222,18 @@
 )
 
 // JsonName returns the default JSON name for a field with the given name.
+// This mirrors the algorithm in protoc:
+//
+//	https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95
 func JsonName(name string) string {
 	var js []rune
 	nextUpper := false
-	for i, r := range name {
+	for _, r := range name {
 		if r == '_' {
 			nextUpper = true
 			continue
 		}
-		if i == 0 {
-			js = append(js, r)
-		} else if nextUpper {
+		if nextUpper {
 			nextUpper = false
 			js = append(js, unicode.ToUpper(r))
 		} else {
@@ -251,7 +255,8 @@
 // that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
 // successively shorter prefixes of the package and then the empty string. For
 // example, for a package named "foo.bar.baz" it will return the following list:
-//   ["foo.bar.baz", "foo.bar", "foo", ""]
+//
+//	["foo.bar.baz", "foo.bar", "foo", ""]
 func CreatePrefixList(pkg string) []string {
 	if pkg == "" {
 		return []string{""}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
index 4a05830..8fd09ac 100644
--- a/vendor/github.com/jhump/protoreflect/desc/load.go
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -1,150 +1,109 @@
 package desc
 
 import (
+	"errors"
 	"fmt"
 	"reflect"
 	"sync"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
+	"github.com/jhump/protoreflect/desc/sourceinfo"
 	"github.com/jhump/protoreflect/internal"
 )
 
+// The global cache is used to store descriptors that wrap items in
+// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents
+// repeating work to re-wrap underlying global descriptors.
 var (
-	cacheMu       sync.RWMutex
-	filesCache    = map[string]*FileDescriptor{}
-	messagesCache = map[string]*MessageDescriptor{}
-	enumCache     = map[reflect.Type]*EnumDescriptor{}
+	// We put all wrapped file and message descriptors in this cache.
+	loadedDescriptors = lockingCache{cache: mapCache{}}
+
+	// Unfortunately, we need a different mechanism for enums for
+	// compatibility with old APIs, which required that they were
+	// registered in a different way :(
+	loadedEnumsMu sync.RWMutex
+	loadedEnums   = map[reflect.Type]*EnumDescriptor{}
 )
 
 // LoadFileDescriptor creates a file descriptor using the bytes returned by
 // proto.FileDescriptor. Descriptors are cached so that they do not need to be
 // re-processed if the same file is fetched again later.
 func LoadFileDescriptor(file string) (*FileDescriptor, error) {
-	return loadFileDescriptor(file, nil)
-}
-
-func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
-	f := getFileFromCache(file)
-	if f != nil {
-		return f, nil
-	}
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadFileDescriptorLocked(file, r)
-}
-
-func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
-	f := filesCache[file]
-	if f != nil {
-		return f, nil
-	}
-	fd, err := internal.LoadFileDescriptor(file)
-	if err != nil {
-		return nil, err
-	}
-
-	f, err = toFileDescriptorLocked(fd, r)
-	if err != nil {
-		return nil, err
-	}
-	putCacheLocked(file, f)
-	return f, nil
-}
-
-func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
-	deps := make([]*FileDescriptor, len(fd.GetDependency()))
-	for i, dep := range fd.GetDependency() {
-		resolvedDep := r.ResolveImport(fd.GetName(), dep)
-		var err error
-		deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
-		if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
-			// try original path
-			deps[i], err = loadFileDescriptorLocked(dep, r)
-		}
-		if err != nil {
-			return nil, err
+	d, err := sourceinfo.GlobalFiles.FindFileByPath(file)
+	if errors.Is(err, protoregistry.NotFound) {
+		// for backwards compatibility, see if this matches a known old
+		// alias for the file (older versions of libraries that registered
+		// the files using incorrect/non-canonical paths)
+		if alt := internal.StdFileAliases[file]; alt != "" {
+			d, err = sourceinfo.GlobalFiles.FindFileByPath(alt)
 		}
 	}
-	return CreateFileDescriptor(fd, deps...)
-}
-
-func getFileFromCache(file string) *FileDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return filesCache[file]
-}
-
-func putCacheLocked(filename string, fd *FileDescriptor) {
-	filesCache[filename] = fd
-	putMessageCacheLocked(fd.messages)
-}
-
-func putMessageCacheLocked(mds []*MessageDescriptor) {
-	for _, md := range mds {
-		messagesCache[md.fqn] = md
-		putMessageCacheLocked(md.nested)
+	if err != nil {
+		if !errors.Is(err, protoregistry.NotFound) {
+			return nil, internal.ErrNoSuchFile(file)
+		}
+		return nil, err
 	}
-}
+	if fd := loadedDescriptors.get(d); fd != nil {
+		return fd.(*FileDescriptor), nil
+	}
 
-// interface implemented by generated messages, which all have a Descriptor() method in
-// addition to the methods of proto.Message
-type protoMessage interface {
-	proto.Message
-	Descriptor() ([]byte, []int)
+	var fd *FileDescriptor
+	loadedDescriptors.withLock(func(cache descriptorCache) {
+		fd, err = wrapFile(d, cache)
+	})
+	return fd, err
 }
 
 // LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
 // Message.Descriptor() for the given message type. If the given type is not recognized,
 // then a nil descriptor is returned.
 func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
-	return loadMessageDescriptor(message, nil)
+	mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message))
+	if err != nil {
+		if errors.Is(err, protoregistry.NotFound) {
+			return nil, nil
+		}
+		return nil, err
+	}
+	return loadMessageDescriptor(mt.Descriptor())
 }
 
-func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
-	m := getMessageFromCache(message)
-	if m != nil {
-		return m, nil
+func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+	d := loadedDescriptors.get(md)
+	if d != nil {
+		return d.(*MessageDescriptor), nil
 	}
 
-	pt := proto.MessageType(message)
-	if pt == nil {
-		return nil, nil
-	}
-	msg, err := messageFromType(pt)
+	var err error
+	loadedDescriptors.withLock(func(cache descriptorCache) {
+		d, err = wrapMessage(md, cache)
+	})
 	if err != nil {
 		return nil, err
 	}
-
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadMessageDescriptorForTypeLocked(message, msg, r)
+	return d.(*MessageDescriptor), err
 }
 
 // LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
 // by message.Descriptor() for the given message type. If the given type is not recognized,
 // then a nil descriptor is returned.
 func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForType(messageType, nil)
-}
-
-func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
 	m, err := messageFromType(messageType)
 	if err != nil {
 		return nil, err
 	}
-	return loadMessageDescriptorForMessage(m, r)
+	return LoadMessageDescriptorForMessage(m)
 }
 
 // LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
 // returned by message.Descriptor(). If the given type is not recognized, then a nil
 // descriptor is returned.
 func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForMessage(message, nil)
-}
-
-func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
 	// efficiently handle dynamic messages
 	type descriptorable interface {
 		GetMessageDescriptor() *MessageDescriptor
@@ -153,57 +112,26 @@
 		return d.GetMessageDescriptor(), nil
 	}
 
-	name := proto.MessageName(message)
-	if name == "" {
-		return nil, nil
+	var md protoreflect.MessageDescriptor
+	if m, ok := message.(protoreflect.ProtoMessage); ok {
+		md = m.ProtoReflect().Descriptor()
+	} else {
+		md = proto.MessageReflect(message).Descriptor()
 	}
-	m := getMessageFromCache(name)
-	if m != nil {
-		return m, nil
-	}
-
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+	return loadMessageDescriptor(sourceinfo.WrapMessage(md))
 }
 
-func messageFromType(mt reflect.Type) (protoMessage, error) {
+func messageFromType(mt reflect.Type) (proto.Message, error) {
 	if mt.Kind() != reflect.Ptr {
 		mt = reflect.PtrTo(mt)
 	}
-	m, ok := reflect.Zero(mt).Interface().(protoMessage)
+	m, ok := reflect.Zero(mt).Interface().(proto.Message)
 	if !ok {
 		return nil, fmt.Errorf("failed to create message from type: %v", mt)
 	}
 	return m, nil
 }
 
-func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
-	m := messagesCache[name]
-	if m != nil {
-		return m, nil
-	}
-
-	fdb, _ := message.Descriptor()
-	fd, err := internal.DecodeFileDescriptor(name, fdb)
-	if err != nil {
-		return nil, err
-	}
-
-	f, err := toFileDescriptorLocked(fd, r)
-	if err != nil {
-		return nil, err
-	}
-	putCacheLocked(fd.GetName(), f)
-	return f.FindSymbol(name).(*MessageDescriptor), nil
-}
-
-func getMessageFromCache(message string) *MessageDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return messagesCache[message]
-}
-
 // interface implemented by all generated enums
 type protoEnum interface {
 	EnumDescriptor() ([]byte, []int)
@@ -220,10 +148,6 @@
 // LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
 // by enum.EnumDescriptor() for the given enum type.
 func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForType(enumType, nil)
-}
-
-func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
 	// we cache descriptors using non-pointer type
 	if enumType.Kind() == reflect.Ptr {
 		enumType = enumType.Elem()
@@ -237,18 +161,24 @@
 		return nil, err
 	}
 
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+	return loadEnumDescriptor(enumType, enum)
+}
+
+func getEnumFromCache(t reflect.Type) *EnumDescriptor {
+	loadedEnumsMu.RLock()
+	defer loadedEnumsMu.RUnlock()
+	return loadedEnums[t]
+}
+
+func putEnumInCache(t reflect.Type, d *EnumDescriptor) {
+	loadedEnumsMu.Lock()
+	defer loadedEnumsMu.Unlock()
+	loadedEnums[t] = d
 }
 
 // LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
 // returned by enum.EnumDescriptor().
 func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForEnum(enum, nil)
-}
-
-func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
 	et := reflect.TypeOf(enum)
 	// we cache descriptors using non-pointer type
 	if et.Kind() == reflect.Ptr {
@@ -260,55 +190,46 @@
 		return e, nil
 	}
 
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadEnumDescriptorForTypeLocked(et, enum, r)
+	return loadEnumDescriptor(et, enum)
 }
 
 func enumFromType(et reflect.Type) (protoEnum, error) {
-	if et.Kind() != reflect.Int32 {
-		et = reflect.PtrTo(et)
-	}
 	e, ok := reflect.Zero(et).Interface().(protoEnum)
 	if !ok {
+		if et.Kind() != reflect.Ptr {
+			et = et.Elem()
+		}
+		e, ok = reflect.Zero(et).Interface().(protoEnum)
+	}
+	if !ok {
 		return nil, fmt.Errorf("failed to create enum from type: %v", et)
 	}
 	return e, nil
 }
 
-func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
-	e := enumCache[et]
-	if e != nil {
-		return e, nil
-	}
-
+func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) {
 	fdb, path := enum.EnumDescriptor()
-	name := fmt.Sprintf("%v", et)
+	name := fmt.Sprintf("%T", enum)
 	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	return fd, path, err
+}
+
+func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) {
+	fdp, path, err := getDescriptorForEnum(enum)
 	if err != nil {
 		return nil, err
 	}
-	// see if we already have cached "rich" descriptor
-	f, ok := filesCache[fd.GetName()]
-	if !ok {
-		f, err = toFileDescriptorLocked(fd, r)
-		if err != nil {
-			return nil, err
-		}
-		putCacheLocked(fd.GetName(), f)
+
+	fd, err := LoadFileDescriptor(fdp.GetName())
+	if err != nil {
+		return nil, err
 	}
 
-	ed := findEnum(f, path)
-	enumCache[et] = ed
+	ed := findEnum(fd, path)
+	putEnumInCache(et, ed)
 	return ed, nil
 }
 
-func getEnumFromCache(et reflect.Type) *EnumDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return enumCache[et]
-}
-
 func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
 	if len(path) == 1 {
 		return fd.GetEnumTypes()[path[0]]
@@ -323,11 +244,7 @@
 // LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
 // extension description.
 func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
-	return loadFieldDescriptorForExtension(ext, nil)
-}
-
-func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
-	file, err := loadFileDescriptor(ext.Filename, r)
+	file, err := LoadFileDescriptor(ext.Filename)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
new file mode 100644
index 0000000..20d2d7a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
@@ -0,0 +1,207 @@
+package sourceinfo
+
+import (
+	"math"
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+// NB: forked from google.golang.org/protobuf/internal/filedesc
+type sourceLocations struct {
+	protoreflect.SourceLocations
+
+	orig []*descriptorpb.SourceCodeInfo_Location
+	// locs is a list of sourceLocations.
+	// The SourceLocation.Next field does not need to be populated
+	// as it will be lazily populated upon first need.
+	locs []protoreflect.SourceLocation
+
+	// fd is the parent file descriptor that these locations are relative to.
+	// If non-nil, ByDescriptor verifies that the provided descriptor
+	// is a child of this file descriptor.
+	fd protoreflect.FileDescriptor
+
+	once   sync.Once
+	byPath map[pathKey]int
+}
+
+func (p *sourceLocations) Len() int { return len(p.orig) }
+func (p *sourceLocations) Get(i int) protoreflect.SourceLocation {
+	return p.lazyInit().locs[i]
+}
+func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
+	if i, ok := p.lazyInit().byPath[k]; ok {
+		return p.locs[i]
+	}
+	return protoreflect.SourceLocation{}
+}
+func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
+	return p.byKey(newPathKey(path))
+}
+func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
+	if p.fd != nil && desc != nil && p.fd != desc.ParentFile() {
+		return protoreflect.SourceLocation{} // mismatching parent imports
+	}
+	var pathArr [16]int32
+	path := pathArr[:0]
+	for {
+		switch desc.(type) {
+		case protoreflect.FileDescriptor:
+			// Reverse the path since it was constructed in reverse.
+			for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+				path[i], path[j] = path[j], path[i]
+			}
+			return p.byKey(newPathKey(path))
+		case protoreflect.MessageDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_messagesTag))
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_nestedMessagesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.FieldDescriptor:
+			isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			if isExtension {
+				switch desc.(type) {
+				case protoreflect.FileDescriptor:
+					path = append(path, int32(internal.File_extensionsTag))
+				case protoreflect.MessageDescriptor:
+					path = append(path, int32(internal.Message_extensionsTag))
+				default:
+					return protoreflect.SourceLocation{}
+				}
+			} else {
+				switch desc.(type) {
+				case protoreflect.MessageDescriptor:
+					path = append(path, int32(internal.Message_fieldsTag))
+				default:
+					return protoreflect.SourceLocation{}
+				}
+			}
+		case protoreflect.OneofDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_oneOfsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.EnumDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_enumsTag))
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_enumsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.EnumValueDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.EnumDescriptor:
+				path = append(path, int32(internal.Enum_valuesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.ServiceDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_servicesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.MethodDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.ServiceDescriptor:
+				path = append(path, int32(internal.Service_methodsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		default:
+			return protoreflect.SourceLocation{}
+		}
+	}
+}
+func (p *sourceLocations) lazyInit() *sourceLocations {
+	p.once.Do(func() {
+		if len(p.orig) > 0 {
+			p.locs = make([]protoreflect.SourceLocation, len(p.orig))
+			// Collect all the indexes for a given path.
+			pathIdxs := make(map[pathKey][]int, len(p.locs))
+			for i := range p.orig {
+				l := asSourceLocation(p.orig[i])
+				p.locs[i] = l
+				k := newPathKey(l.Path)
+				pathIdxs[k] = append(pathIdxs[k], i)
+			}
+
+			// Update the next index for all locations.
+			p.byPath = make(map[pathKey]int, len(p.locs))
+			for k, idxs := range pathIdxs {
+				for i := 0; i < len(idxs)-1; i++ {
+					p.locs[idxs[i]].Next = idxs[i+1]
+				}
+				p.locs[idxs[len(idxs)-1]].Next = 0
+				p.byPath[k] = idxs[0] // record the first location for this path
+			}
+		}
+	})
+	return p
+}
+
+func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation {
+	endLine := l.Span[0]
+	endCol := l.Span[2]
+	if len(l.Span) > 3 {
+		endLine = l.Span[2]
+		endCol = l.Span[3]
+	}
+	return protoreflect.SourceLocation{
+		Path:                    l.Path,
+		StartLine:               int(l.Span[0]),
+		StartColumn:             int(l.Span[1]),
+		EndLine:                 int(endLine),
+		EndColumn:               int(endCol),
+		LeadingDetachedComments: l.LeadingDetachedComments,
+		LeadingComments:         l.GetLeadingComments(),
+		TrailingComments:        l.GetTrailingComments(),
+	}
+}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+	arr [16]uint8 // first n-1 path segments; last element is the length
+	str string    // used if the path does not fit in arr
+}
+
+func newPathKey(p protoreflect.SourcePath) (k pathKey) {
+	if len(p) < len(k.arr) {
+		for i, ps := range p {
+			if ps < 0 || math.MaxUint8 <= ps {
+				return pathKey{str: p.String()}
+			}
+			k.arr[i] = uint8(ps)
+		}
+		k.arr[len(k.arr)-1] = uint8(len(p))
+		return k
+	}
+	return pathKey{str: p.String()}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
new file mode 100644
index 0000000..8301c40
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
@@ -0,0 +1,340 @@
+// Package sourceinfo provides the ability to register and query source code info
+// for file descriptors that are compiled into the binary. This data is registered
+// by code generated from the protoc-gen-gosrcinfo plugin.
+//
+// The standard descriptors bundled into the compiled binary are stripped of source
+// code info, to reduce binary size and reduce runtime memory footprint. However,
+// the source code info can be very handy and worth the size cost when used with
+// gRPC services and the server reflection service. Without source code info, the
+// descriptors that a client downloads from the reflection service have no comments.
+// But the presence of comments, and the ability to show them to humans, can greatly
+// improve the utility of user agents that use the reflection service.
+//
+// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load
+// descriptors for compiled-in elements, will automatically include source code
+// info, using the data registered with this package.
+//
+// In order to make the reflection service use this functionality, you will need to
+// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The
+// following snippet demonstrates how to do this in your server. Do this instead of
+// using the reflection.Register function:
+//
+//	refSvr := reflection.NewServer(reflection.ServerOptions{
+//	    Services:           grpcServer,
+//	    DescriptorResolver: sourceinfo.GlobalFiles,
+//	    ExtensionResolver:  sourceinfo.GlobalFiles,
+//	})
+//	grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr)
+package sourceinfo
+
+import (
+	"bytes"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
+)
+
+var (
+	// GlobalFiles is a registry of descriptors that include source code info, if the
+	// files they belong to were processed with protoc-gen-gosrcinfo.
+	//
+	// If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that
+	// can include source code info in the returned descriptors.
+	GlobalFiles Resolver = registry{}
+
+	// GlobalTypes is a registry of descriptors that include source code info, if the
+	// files they belong to were processed with protoc-gen-gosrcinfo.
+	//
+	// If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that
+	// can include source code info in the returned descriptors.
+	GlobalTypes TypeResolver = registry{}
+
+	mu                 sync.RWMutex
+	sourceInfoByFile   = map[string]*descriptorpb.SourceCodeInfo{}
+	fileDescriptors    = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{}
+	updatedDescriptors filesWithFallback
+)
+
+// Resolver can resolve file names into file descriptors and also provides methods for
+// resolving extensions.
+type Resolver interface {
+	protodesc.Resolver
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// NB: These interfaces are far from ideal. Ideally, Resolver would have
+//    * EITHER been named FileResolver and not included the extension methods.
+//    * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver).
+//   Now (since it's been released) we can't add the message methods to the interface as
+//   that's not a backwards-compatible change. So we have to introduce the new interface
+//   below, which is now a little confusing since it has some overlap with Resolver.
+
+// TypeResolver can resolve message names and URLs into message descriptors and also
+// provides methods for resolving extensions.
+type TypeResolver interface {
+	protoregistry.MessageTypeResolver
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// RegisterSourceInfo registers the given source code info for the file descriptor
+// with the given path/name.
+//
+// This is automatically used from older generated code if using a previous release of
+// the protoc-gen-gosrcinfo plugin.
+func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) {
+	mu.Lock()
+	defer mu.Unlock()
+	sourceInfoByFile[file] = srcInfo
+}
+
+// RegisterEncodedSourceInfo registers the given source code info, which is a serialized
+// and gzipped form of a google.protobuf.SourceCodeInfo message.
+//
+// This is automatically used from generated code if using the protoc-gen-gosrcinfo
+// plugin.
+func RegisterEncodedSourceInfo(file string, data []byte) error {
+	zipReader, err := gzip.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return err
+	}
+	defer func() {
+		_ = zipReader.Close()
+	}()
+	unzipped, err := io.ReadAll(zipReader)
+	if err != nil {
+		return err
+	}
+	var srcInfo descriptorpb.SourceCodeInfo
+	if err := proto.Unmarshal(unzipped, &srcInfo); err != nil {
+		return err
+	}
+	RegisterSourceInfo(file, &srcInfo)
+	return nil
+}
+
+// SourceInfoForFile queries for any registered source code info for the file
+// descriptor with the given path/name. It returns nil if no source code info
+// was registered.
+func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo {
+	mu.RLock()
+	defer mu.RUnlock()
+	return sourceInfoByFile[file]
+}
+
+func canUpgrade(d protoreflect.Descriptor) bool {
+	if d == nil {
+		return false
+	}
+	fd := d.ParentFile()
+	if fd.SourceLocations().Len() > 0 {
+		// already has source info
+		return false
+	}
+	if genFile, err := protoregistry.GlobalFiles.FindFileByPath(fd.Path()); err != nil || genFile != fd {
+		// given descriptor is not from generated code
+		return false
+	}
+	return true
+}
+
+func getFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	if !canUpgrade(fd) {
+		return fd, nil
+	}
+
+	mu.RLock()
+	result := fileDescriptors[fd]
+	mu.RUnlock()
+
+	if result != nil {
+		return result, nil
+	}
+
+	mu.Lock()
+	defer mu.Unlock()
+	result, err := getFileLocked(fd)
+	if err != nil {
+		return nil, fmt.Errorf("updating file %q: %w", fd.Path(), err)
+	}
+	return result, nil
+}
+
+func getFileLocked(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	result := fileDescriptors[fd]
+	if result != nil {
+		return result, nil
+	}
+
+	// We have to build its dependencies, too, so that the descriptor's
+	// references *all* have source code info.
+	var deps []protoreflect.FileDescriptor
+	imps := fd.Imports()
+	for i, length := 0, imps.Len(); i < length; i++ {
+		origDep := imps.Get(i).FileDescriptor
+		updatedDep, err := getFileLocked(origDep)
+		if err != nil {
+			return nil, fmt.Errorf("updating import %q: %w", origDep.Path(), err)
+		}
+		if updatedDep != origDep && deps == nil {
+			// lazily init slice of deps and copy over deps before this one
+			deps = make([]protoreflect.FileDescriptor, i, length)
+			for j := 0; j < i; j++ {
+				deps[j] = imps.Get(i).FileDescriptor
+			}
+		}
+		if deps != nil {
+			deps = append(deps, updatedDep)
+		}
+	}
+
+	srcInfo := sourceInfoByFile[fd.Path()]
+	if len(srcInfo.GetLocation()) == 0 && len(deps) == 0 {
+		// nothing to do; don't bother changing
+		return fd, nil
+	}
+
+	// Add source code info and rebuild.
+	fdProto := protodesc.ToFileDescriptorProto(fd)
+	fdProto.SourceCodeInfo = srcInfo
+
+	result, err := protodesc.NewFile(fdProto, &updatedDescriptors)
+	if err != nil {
+		return nil, err
+	}
+	if err := updatedDescriptors.RegisterFile(result); err != nil {
+		return nil, fmt.Errorf("registering import %q: %w", result.Path(), err)
+	}
+
+	fileDescriptors[fd] = result
+	return result, nil
+}
+
+type registry struct{}
+
+var _ protodesc.Resolver = &registry{}
+
+func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	fd, err := protoregistry.GlobalFiles.FindFileByPath(path)
+	if err != nil {
+		return nil, err
+	}
+	return getFile(fd)
+}
+
+func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+	d, err := protoregistry.GlobalFiles.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	if !canUpgrade(d) {
+		return d, nil
+	}
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return getFile(d)
+	case protoreflect.MessageDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.FieldDescriptor:
+		return updateField(d)
+	case protoreflect.OneofDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.EnumDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.EnumValueDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.ServiceDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.MethodDescriptor:
+		return updateDescriptor(d)
+	default:
+		return nil, fmt.Errorf("unrecognized descriptor type: %T", d)
+	}
+}
+
+func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+	mt, err := protoregistry.GlobalTypes.FindMessageByName(message)
+	if err != nil {
+		return nil, err
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return mt, nil
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+	if err != nil {
+		return nil, err
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return mt, nil
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	xt, err := protoregistry.GlobalTypes.FindExtensionByName(field)
+	if err != nil {
+		return nil, err
+	}
+	ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return xt, nil
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+	if err != nil {
+		return nil, err
+	}
+	ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return xt, nil
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) {
+	protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool {
+		ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+		if err != nil {
+			return fn(xt)
+		}
+		return fn(extensionType{ExtensionType: xt, extDesc: ext})
+	})
+}
+
+type filesWithFallback struct {
+	protoregistry.Files
+}
+
+func (f *filesWithFallback) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	fd, err := f.Files.FindFileByPath(path)
+	if errors.Is(err, protoregistry.NotFound) {
+		fd, err = protoregistry.GlobalFiles.FindFileByPath(path)
+	}
+	return fd, err
+}
+
+func (f *filesWithFallback) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+	fd, err := f.Files.FindDescriptorByName(name)
+	if errors.Is(err, protoregistry.NotFound) {
+		fd, err = protoregistry.GlobalFiles.FindDescriptorByName(name)
+	}
+	return fd, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
new file mode 100644
index 0000000..53bc457
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
@@ -0,0 +1,314 @@
+package sourceinfo
+
+import (
+	"fmt"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// AddSourceInfoToFile will return a new file descriptor that is a copy
+// of fd except that it includes source code info. If the given file
+// already contains source info, was not registered from generated code,
+// or was not processed with the protoc-gen-gosrcinfo plugin, then fd
+// is returned as is, unchanged.
+func AddSourceInfoToFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	return getFile(fd)
+}
+
+// AddSourceInfoToMessage will return a new message descriptor that is a
+// copy of md except that it includes source code info. If the file that
+// contains the given message descriptor already contains source info,
+// was not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then md is returned as is, unchanged.
+func AddSourceInfoToMessage(md protoreflect.MessageDescriptor) (protoreflect.MessageDescriptor, error) {
+	return updateDescriptor(md)
+}
+
+// AddSourceInfoToEnum will return a new enum descriptor that is a copy
+// of ed except that it includes source code info. If the file that
+// contains the given enum descriptor already contains source info, was
+// not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then ed is returned as is, unchanged.
+func AddSourceInfoToEnum(ed protoreflect.EnumDescriptor) (protoreflect.EnumDescriptor, error) {
+	return updateDescriptor(ed)
+}
+
+// AddSourceInfoToService will return a new service descriptor that is
+// a copy of sd except that it includes source code info. If the file
+// that contains the given service descriptor already contains source
+// info, was not registered from generated code, or was not processed
+// with the protoc-gen-gosrcinfo plugin, then ed is returned as is,
+// unchanged.
+func AddSourceInfoToService(sd protoreflect.ServiceDescriptor) (protoreflect.ServiceDescriptor, error) {
+	return updateDescriptor(sd)
+}
+
+// AddSourceInfoToExtensionType will return a new extension type that
+// is a copy of xt except that its associated descriptors includes
+// source code info. If the file that contains the given extension
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then xt is returned as is, unchanged.
+func AddSourceInfoToExtensionType(xt protoreflect.ExtensionType) (protoreflect.ExtensionType, error) {
+	if genType, err := protoregistry.GlobalTypes.FindExtensionByName(xt.TypeDescriptor().FullName()); err != nil || genType != xt {
+		return xt, nil // not from generated code
+	}
+	ext, err := updateField(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return nil, err
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+// AddSourceInfoToMessageType will return a new message type that
+// is a copy of mt except that its associated descriptors includes
+// source code info. If the file that contains the given message
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then mt is returned as is, unchanged.
+func AddSourceInfoToMessageType(mt protoreflect.MessageType) (protoreflect.MessageType, error) {
+	if genType, err := protoregistry.GlobalTypes.FindMessageByName(mt.Descriptor().FullName()); err != nil || genType != mt {
+		return mt, nil // not from generated code
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return nil, err
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+// WrapFile is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToFile and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToFile directly instead. The word "wrap" is
+// a misnomer since this method does not actually wrap the given value.
+// Though unlikely, the operation can technically fail, so the recommended
+// function allows the return of an error instead of panic'ing.
+func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor {
+	result, err := AddSourceInfoToFile(fd)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapMessage is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToMessage and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToMessage directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor {
+	result, err := AddSourceInfoToMessage(md)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapEnum is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToEnum and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToEnum directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor {
+	result, err := AddSourceInfoToEnum(ed)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapService is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToService and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToService directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor {
+	result, err := AddSourceInfoToService(sd)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapExtensionType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToExtensionType and panics if that function
+// returns an error.
+//
+// Deprecated: Use AddSourceInfoToExtensionType directly instead. The
+// word "wrap" is a misnomer since this method does not actually wrap
+// the given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType {
+	result, err := AddSourceInfoToExtensionType(xt)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapMessageType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToMessageType and panics if that function returns
+// an error.
+//
+// Deprecated: Use AddSourceInfoToMessageType directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail, so
+// the recommended function allows the return of an error instead of
+// panic'ing.
+func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType {
+	result, err := AddSourceInfoToMessageType(mt)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+type extensionType struct {
+	protoreflect.ExtensionType
+	extDesc protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+	return extensionTypeDescriptor{ExtensionDescriptor: xt.extDesc, extType: xt.ExtensionType}
+}
+
+type extensionTypeDescriptor struct {
+	protoreflect.ExtensionDescriptor
+	extType protoreflect.ExtensionType
+}
+
+func (xtd extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+	return extensionType{ExtensionType: xtd.extType, extDesc: xtd.ExtensionDescriptor}
+}
+
+func (xtd extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+	return xtd.ExtensionDescriptor
+}
+
+type messageType struct {
+	protoreflect.MessageType
+	msgDesc protoreflect.MessageDescriptor
+}
+
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor {
+	return mt.msgDesc
+}
+
+func updateField(fd protoreflect.FieldDescriptor) (protoreflect.FieldDescriptor, error) {
+	if xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor); ok {
+		ext, err := updateField(xtd.Descriptor())
+		if err != nil {
+			return nil, err
+		}
+		return extensionTypeDescriptor{ExtensionDescriptor: ext, extType: xtd.Type()}, nil
+	}
+	d, err := updateDescriptor(fd)
+	if err != nil {
+		return nil, err
+	}
+	return d.(protoreflect.FieldDescriptor), nil
+}
+
+func updateDescriptor[D protoreflect.Descriptor](d D) (D, error) {
+	updatedFile, err := getFile(d.ParentFile())
+	if err != nil {
+		var zero D
+		return zero, err
+	}
+	if updatedFile == d.ParentFile() {
+		// no change
+		return d, nil
+	}
+	updated := findDescriptor(updatedFile, d)
+	result, ok := updated.(D)
+	if !ok {
+		var zero D
+		return zero, fmt.Errorf("updated result is type %T which could not be converted to %T", updated, result)
+	}
+	return result, nil
+}
+
+func findDescriptor(fd protoreflect.FileDescriptor, d protoreflect.Descriptor) protoreflect.Descriptor {
+	if d == nil {
+		return nil
+	}
+	if _, isFile := d.(protoreflect.FileDescriptor); isFile {
+		return fd
+	}
+	if d.Parent() == nil {
+		return d
+	}
+	switch d := d.(type) {
+	case protoreflect.MessageDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(messageContainer)
+		return parent.Messages().Get(d.Index())
+	case protoreflect.FieldDescriptor:
+		if d.IsExtension() {
+			parent := findDescriptor(fd, d.Parent()).(extensionContainer)
+			return parent.Extensions().Get(d.Index())
+		} else {
+			parent := findDescriptor(fd, d.Parent()).(fieldContainer)
+			return parent.Fields().Get(d.Index())
+		}
+	case protoreflect.OneofDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(oneofContainer)
+		return parent.Oneofs().Get(d.Index())
+	case protoreflect.EnumDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(enumContainer)
+		return parent.Enums().Get(d.Index())
+	case protoreflect.EnumValueDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(enumValueContainer)
+		return parent.Values().Get(d.Index())
+	case protoreflect.ServiceDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(serviceContainer)
+		return parent.Services().Get(d.Index())
+	case protoreflect.MethodDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(methodContainer)
+		return parent.Methods().Get(d.Index())
+	}
+	return d
+}
+
+type messageContainer interface {
+	Messages() protoreflect.MessageDescriptors
+}
+
+type extensionContainer interface {
+	Extensions() protoreflect.ExtensionDescriptors
+}
+
+type fieldContainer interface {
+	Fields() protoreflect.FieldDescriptors
+}
+
+type oneofContainer interface {
+	Oneofs() protoreflect.OneofDescriptors
+}
+
+type enumContainer interface {
+	Enums() protoreflect.EnumDescriptors
+}
+
+type enumValueContainer interface {
+	Values() protoreflect.EnumValueDescriptors
+}
+
+type serviceContainer interface {
+	Services() protoreflect.ServiceDescriptors
+}
+
+type methodContainer interface {
+	Methods() protoreflect.MethodDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go
new file mode 100644
index 0000000..5491afd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go
@@ -0,0 +1,211 @@
+package desc
+
+import (
+	"fmt"
+
+	"github.com/bufbuild/protocompile/protoutil"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor
+// implementations in this package implement this interface. This can be
+// used to recover the underlying descriptor. Each descriptor type in this
+// package also provides a strongly-typed form of this method, such as the
+// following method for *FileDescriptor:
+//
+//	UnwrapFile() protoreflect.FileDescriptor
+type DescriptorWrapper interface {
+	Unwrap() protoreflect.Descriptor
+}
+
+// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor
+// value that represents the same element.
+func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) {
+	return wrapDescriptor(d, mapCache{})
+}
+
+func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return wrapFile(d, cache)
+	case protoreflect.MessageDescriptor:
+		return wrapMessage(d, cache)
+	case protoreflect.FieldDescriptor:
+		return wrapField(d, cache)
+	case protoreflect.OneofDescriptor:
+		return wrapOneOf(d, cache)
+	case protoreflect.EnumDescriptor:
+		return wrapEnum(d, cache)
+	case protoreflect.EnumValueDescriptor:
+		return wrapEnumValue(d, cache)
+	case protoreflect.ServiceDescriptor:
+		return wrapService(d, cache)
+	case protoreflect.MethodDescriptor:
+		return wrapMethod(d, cache)
+	default:
+		return nil, fmt.Errorf("unknown descriptor type: %T", d)
+	}
+}
+
+// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor
+// values that represent the same files.
+func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) {
+	cache := mapCache{}
+	results := make([]*FileDescriptor, len(d))
+	for i := range d {
+		var err error
+		results[i], err = wrapFile(d[i], cache)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return results, nil
+}
+
+// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor
+// value that represents the same file.
+func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) {
+	return wrapFile(d, mapCache{})
+}
+
+func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) {
+	if res := cache.get(d); res != nil {
+		return res.(*FileDescriptor), nil
+	}
+	fdp := protoutil.ProtoFromFileDescriptor(d)
+	return convertFile(d, fdp, cache)
+}
+
+// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor
+// value that represents the same message.
+func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+	return wrapMessage(d, mapCache{})
+}
+
+func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.messages[d.Index()], nil
+	case *MessageDescriptor:
+		return p.nested[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("message has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor
+// value that represents the same field.
+func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) {
+	return wrapField(d, mapCache{})
+}
+
+func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.extensions[d.Index()], nil
+	case *MessageDescriptor:
+		if d.IsExtension() {
+			return p.extensions[d.Index()], nil
+		}
+		return p.fields[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("field has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor
+// value that represents the same oneof.
+func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) {
+	return wrapOneOf(d, mapCache{})
+}
+
+func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*MessageDescriptor); ok {
+		return p.oneOfs[d.Index()], nil
+	}
+	return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent)
+}
+
+// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor
+// value that represents the same enum.
+func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) {
+	return wrapEnum(d, mapCache{})
+}
+
+func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.enums[d.Index()], nil
+	case *MessageDescriptor:
+		return p.enums[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("enum has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor
+// value that represents the same enum value.
+func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) {
+	return wrapEnumValue(d, mapCache{})
+}
+
+func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*EnumDescriptor); ok {
+		return p.values[d.Index()], nil
+	}
+	return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent)
+}
+
+// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor
+// value that represents the same service.
+func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) {
+	return wrapService(d, mapCache{})
+}
+
+func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*FileDescriptor); ok {
+		return p.services[d.Index()], nil
+	}
+	return nil, fmt.Errorf("service has unexpected parent type: %T", parent)
+}
+
+// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor
+// value that represents the same method.
+func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) {
+	return wrapMethod(d, mapCache{})
+}
+
+func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*ServiceDescriptor); ok {
+		return p.methods[d.Index()], nil
+	}
+	return nil, fmt.Errorf("method has unexpected parent type: %T", parent)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
index 91fd672..39e077a 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/binary.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -4,9 +4,11 @@
 
 import (
 	"fmt"
-	"github.com/golang/protobuf/proto"
-	"github.com/jhump/protoreflect/codec"
 	"io"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/codec"
 )
 
 // defaultDeterminism, if true, will mean that calls to Marshal will produce
@@ -71,6 +73,9 @@
 }
 
 func (m *Message) marshal(b *codec.Buffer) error {
+	if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+		return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+	}
 	if err := m.marshalKnownFields(b); err != nil {
 		return err
 	}
@@ -150,6 +155,9 @@
 }
 
 func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+	if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+		return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+	}
 	for !buf.EOF() {
 		fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
 		if err != nil {
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
index c329fcd..59b77eb 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/doc.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -15,8 +15,7 @@
 // will be used to create other messages or parse extension fields during
 // de-serialization.
 //
-//
-// Field Types
+// # Field Types
 //
 // The types of values expected by setters and returned by getters are the
 // same as protoc generates for scalar fields. For repeated fields, there are
@@ -72,8 +71,7 @@
 // but if the factory is configured with a KnownTypeRegistry, or if the field's
 // type is a well-known type, it will return a zero value generated message.
 //
-//
-// Unrecognized Fields
+// # Unrecognized Fields
 //
 // Unrecognized fields are preserved by the dynamic message when unmarshaling
 // from the standard binary format. If the message's MessageFactory was
@@ -89,21 +87,20 @@
 // it can even happen for non-extension fields! Here's an example scenario where
 // a non-extension field can initially be unknown and become known:
 //
-//   1. A dynamic message is created with a descriptor, A, and then
-//      de-serialized from a stream of bytes. The stream includes an
-//      unrecognized tag T. The message will include tag T in its unrecognized
-//      field set.
-//   2. Another call site retrieves a newer descriptor, A', which includes a
-//      newly added field with tag T.
-//   3. That other call site then uses a FieldDescriptor to access the value of
-//      the new field. This will cause the dynamic message to parse the bytes
-//      for the unknown tag T and store them as a known field.
-//   4. Subsequent operations for tag T, including setting the field using only
-//      tag number or de-serializing a stream that includes tag T, will operate
-//      as if that tag were part of the original descriptor, A.
+//  1. A dynamic message is created with a descriptor, A, and then
+//     de-serialized from a stream of bytes. The stream includes an
+//     unrecognized tag T. The message will include tag T in its unrecognized
+//     field set.
+//  2. Another call site retrieves a newer descriptor, A', which includes a
+//     newly added field with tag T.
+//  3. That other call site then uses a FieldDescriptor to access the value of
+//     the new field. This will cause the dynamic message to parse the bytes
+//     for the unknown tag T and store them as a known field.
+//  4. Subsequent operations for tag T, including setting the field using only
+//     tag number or de-serializing a stream that includes tag T, will operate
+//     as if that tag were part of the original descriptor, A.
 //
-//
-// Compatibility
+// # Compatibility
 //
 // In addition to implementing the proto.Message interface, the included
 // Message type also provides an XXX_MessageName() method, so it can work with
@@ -145,8 +142,7 @@
 // about fields that the dynamic message does not, these unrecognized fields may
 // become known fields in the generated message.
 //
-//
-// Registries
+// # Registries
 //
 // This package also contains a couple of registries, for managing known types
 // and descriptors.
@@ -160,4 +156,12 @@
 //
 // The ExtensionRegistry allows for recognizing and parsing extensions fields
 // (for proto2 messages).
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/dynamic package,
+// see [google.golang.org/protobuf/types/dynamicpb].
+//
+// [google.golang.org/protobuf/types/dynamicpb]: https://pkg.go.dev/google.golang.org/protobuf/types/dynamicpb
 package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
index de13b92..ff136b0 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -10,7 +10,9 @@
 	"strings"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	protov2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/codec"
 	"github.com/jhump/protoreflect/desc"
@@ -411,19 +413,20 @@
 // The Go type of the returned value, for scalar fields, is the same as protoc
 // would generate for the field (in a non-dynamic message). The table below
 // lists the scalar types and the corresponding Go types.
-//  +-------------------------+-----------+
-//  |       Declared Type     |  Go Type  |
-//  +-------------------------+-----------+
-//  | int32, sint32, sfixed32 | int32     |
-//  | int64, sint64, sfixed64 | int64     |
-//  | uint32, fixed32         | uint32    |
-//  | uint64, fixed64         | uint64    |
-//  | float                   | float32   |
-//  | double                  | double32  |
-//  | bool                    | bool      |
-//  | string                  | string    |
-//  | bytes                   | []byte    |
-//  +-------------------------+-----------+
+//
+//	+-------------------------+-----------+
+//	|       Declared Type     |  Go Type  |
+//	+-------------------------+-----------+
+//	| int32, sint32, sfixed32 | int32     |
+//	| int64, sint64, sfixed64 | int64     |
+//	| uint32, fixed32         | uint32    |
+//	| uint64, fixed64         | uint64    |
+//	| float                   | float32   |
+//	| double                  | double32  |
+//	| bool                    | bool      |
+//	| string                  | string    |
+//	| bytes                   | []byte    |
+//	+-------------------------+-----------+
 //
 // Values for enum fields will always be int32 values. You can use the enum
 // descriptor associated with the field to lookup value names with those values.
@@ -1883,42 +1886,42 @@
 	}
 
 	switch t {
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
-		descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		return toInt32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
-		descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return toInt64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
-		descriptor.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		return toUint32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
-		descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return toUint64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return toFloat32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return toFloat64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return toBool(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return toBytes(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return toString(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		m, err := asMessage(val, fd.GetFullyQualifiedName())
 		// check that message is correct type
 		if err != nil {
@@ -2053,8 +2056,9 @@
 
 // ConvertTo converts this dynamic message into the given message. This is
 // shorthand for resetting then merging:
-//   target.Reset()
-//   m.MergeInto(target)
+//
+//	target.Reset()
+//	m.MergeInto(target)
 func (m *Message) ConvertTo(target proto.Message) error {
 	if err := m.checkType(target); err != nil {
 		return err
@@ -2081,8 +2085,9 @@
 
 // ConvertFrom converts the given message into this dynamic message. This is
 // shorthand for resetting then merging:
-//   m.Reset()
-//   m.MergeFrom(target)
+//
+//	m.Reset()
+//	m.MergeFrom(target)
 func (m *Message) ConvertFrom(target proto.Message) error {
 	if err := m.checkType(target); err != nil {
 		return err
@@ -2553,6 +2558,66 @@
 		}
 	}
 
+	// With API v2, it is possible that the new protoreflect interfaces
+	// were used to store an extension, which means it can't be returned
+	// by proto.ExtensionDescs and it's also not in the unrecognized data.
+	// So we have a separate loop to trawl through it...
+	var err error
+	proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		if !fld.IsExtension() {
+			// normal field... we already got it above
+			return true
+		}
+		xt := fld.(protoreflect.ExtensionTypeDescriptor)
+		if _, ok := xt.Type().(*proto.ExtensionDesc); ok {
+			// known extension... we already got it above
+			return true
+		}
+		var fd *desc.FieldDescriptor
+		fd, err = desc.WrapField(fld)
+		if err != nil {
+			return false
+		}
+		v := convertProtoReflectValue(val)
+		if v, err = validFieldValue(fd, v); err != nil {
+			return false
+		}
+		values[fd] = v
+		return true
+	})
+	if err != nil {
+		return err
+	}
+
+	// unrecognized extensions fields:
+	//   In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs
+	//   above NOR included in unrecognized fields below. These are extensions that use
+	//   a custom extension type (not a generated one -- i.e. not a linked in extension).
+	mr := proto.MessageReflect(pm)
+	var extBytes []byte
+	var retErr error
+	mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		if !fld.IsExtension() {
+			// normal field, already processed above
+			return true
+		}
+		if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
+			if _, ok := extd.Type().(*proto.ExtensionDesc); ok {
+				// normal known extension, already processed above
+				return true
+			}
+		}
+
+		// marshal the extension to bytes and then handle as unknown field below
+		mr.New()
+		mr.Set(fld, val)
+		extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface())
+		return retErr == nil
+	})
+	if retErr != nil {
+		return retErr
+	}
+
 	// now actually perform the merge
 	for fd, v := range values {
 		if err := mergeField(m, fd, v); err != nil {
@@ -2560,6 +2625,12 @@
 		}
 	}
 
+	if len(extBytes) > 0 {
+		// treating unrecognized extensions like unknown fields: best-effort
+		// ignore any error returned: pulling in unknown fields is best-effort
+		_ = m.UnmarshalMerge(extBytes)
+	}
+
 	data := internal.GetUnrecognized(pm)
 	if len(data) > 0 {
 		// ignore any error returned: pulling in unknown fields is best-effort
@@ -2569,6 +2640,31 @@
 	return nil
 }
 
+func convertProtoReflectValue(v protoreflect.Value) interface{} {
+	val := v.Interface()
+	switch val := val.(type) {
+	case protoreflect.Message:
+		return val.Interface()
+	case protoreflect.Map:
+		mp := make(map[interface{}]interface{}, val.Len())
+		val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+			mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v)
+			return true
+		})
+		return mp
+	case protoreflect.List:
+		sl := make([]interface{}, val.Len())
+		for i := 0; i < val.Len(); i++ {
+			sl[i] = convertProtoReflectValue(val.Get(i))
+		}
+		return sl
+	case protoreflect.EnumNumber:
+		return int32(val)
+	default:
+		return val
+	}
+}
+
 // Validate checks that all required fields are present. It returns an error if any are absent.
 func (m *Message) Validate() error {
 	missingFields := m.findMissingFields()
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
index 1eaedfa..6fca393 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -4,11 +4,11 @@
 package grpcdynamic
 
 import (
+	"context"
 	"fmt"
 	"io"
 
 	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/metadata"
 
@@ -27,12 +27,7 @@
 // type used to construct Stubs. But the use of this interface allows
 // construction of stubs that use alternate concrete types as the transport for
 // RPC operations.
-type Channel interface {
-	Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
-	NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
-}
-
-var _ Channel = (*grpc.ClientConn)(nil)
+type Channel = grpc.ClientConnInterface
 
 // NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
 func NewStub(channel Channel) Stub {
@@ -79,6 +74,7 @@
 		ClientStreams: method.IsClientStreaming(),
 	}
 	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		cancel()
 		return nil, err
 	} else {
 		err = cs.SendMsg(request)
@@ -91,6 +87,11 @@
 			cancel()
 			return nil, err
 		}
+		go func() {
+			// when the new stream is finished, also cleanup the parent context
+			<-cs.Context().Done()
+			cancel()
+		}()
 		return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
 	}
 }
@@ -108,8 +109,14 @@
 		ClientStreams: method.IsClientStreaming(),
 	}
 	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		cancel()
 		return nil, err
 	} else {
+		go func() {
+			// when the new stream is finished, also cleanup the parent context
+			<-cs.Context().Done()
+			cancel()
+		}()
 		return &ClientStream{cs, method, s.mf, cancel}, nil
 	}
 }
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
index 02c8298..9081965 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/json.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -17,14 +17,14 @@
 
 	"github.com/golang/protobuf/jsonpb"
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 	// link in the well-known-types that have a special JSON format
-	_ "github.com/golang/protobuf/ptypes/any"
-	_ "github.com/golang/protobuf/ptypes/duration"
-	_ "github.com/golang/protobuf/ptypes/empty"
-	_ "github.com/golang/protobuf/ptypes/struct"
-	_ "github.com/golang/protobuf/ptypes/timestamp"
-	_ "github.com/golang/protobuf/ptypes/wrappers"
+	_ "google.golang.org/protobuf/types/known/anypb"
+	_ "google.golang.org/protobuf/types/known/durationpb"
+	_ "google.golang.org/protobuf/types/known/emptypb"
+	_ "google.golang.org/protobuf/types/known/structpb"
+	_ "google.golang.org/protobuf/types/known/timestamppb"
+	_ "google.golang.org/protobuf/types/known/wrapperspb"
 
 	"github.com/jhump/protoreflect/desc"
 )
@@ -60,7 +60,7 @@
 // This method is convenient shorthand for invoking MarshalJSONPB with a default
 // (zero value) marshaler:
 //
-//    m.MarshalJSONPB(&jsonpb.Marshaler{})
+//	m.MarshalJSONPB(&jsonpb.Marshaler{})
 //
 // So enums are serialized using enum value name strings, and values that are
 // not present (including those with default/zero value for messages defined in
@@ -80,7 +80,7 @@
 // This method is convenient shorthand for invoking MarshalJSONPB with a default
 // (zero value) marshaler:
 //
-//    m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+//	m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
 //
 // So enums are serialized using enum value name strings, and values that are
 // not present (including those with default/zero value for messages defined in
@@ -497,7 +497,7 @@
 // This method is shorthand for invoking UnmarshalJSONPB with a default (zero
 // value) unmarshaler:
 //
-//    m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//	m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
 //
 // So unknown fields will result in an error, and no provided jsonpb.AnyResolver
 // will be used when parsing google.protobuf.Any messages.
@@ -669,13 +669,13 @@
 }
 
 func isWellKnownValue(fd *desc.FieldDescriptor) bool {
-	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+	return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
 		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
 }
 
 func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
 	// we look for ListValue; but we also look for Value, which can be assigned a ListValue
-	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+	return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
 		(fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" ||
 			fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value")
 }
@@ -794,8 +794,8 @@
 	}
 
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 
 		if t == nil && allowNilMessage {
 			// if json is simply "null" return a nil pointer
@@ -822,7 +822,7 @@
 		}
 		return m, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if e, err := r.nextNumber(); err != nil {
 			return nil, err
 		} else {
@@ -842,9 +842,9 @@
 			}
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if i, err := r.nextInt(); err != nil {
 			return nil, err
 		} else if i > math.MaxInt32 || i < math.MinInt32 {
@@ -853,13 +853,13 @@
 			return int32(i), err
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		return r.nextInt()
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if i, err := r.nextUint(); err != nil {
 			return nil, err
 		} else if i > math.MaxUint32 {
@@ -868,11 +868,11 @@
 			return uint32(i), err
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		return r.nextUint()
 
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if str, ok := t.(string); ok {
 			if str == "true" {
 				r.poll() // consume token
@@ -884,20 +884,20 @@
 		}
 		return r.nextBool()
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if f, err := r.nextFloat(); err != nil {
 			return nil, err
 		} else {
 			return float32(f), nil
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return r.nextFloat()
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return r.nextBytes()
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return r.nextString()
 
 	default:
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
index 03162a4..69969fc 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -4,8 +4,9 @@
 package dynamic
 
 import (
-	"github.com/jhump/protoreflect/desc"
 	"reflect"
+
+	"github.com/jhump/protoreflect/desc"
 )
 
 // Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
index ef1b370..fb353cf 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -4,8 +4,9 @@
 package dynamic
 
 import (
-	"github.com/jhump/protoreflect/desc"
 	"reflect"
+
+	"github.com/jhump/protoreflect/desc"
 )
 
 // With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
index 9ab8e61..683e7b3 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -37,9 +37,10 @@
 // (those for which protoc-generated code is statically linked into the Go program) are
 // known types. If any dynamic messages are produced, they will recognize and parse all
 // "default" extension fields. This is the equivalent of:
-//   NewMessageFactoryWithRegistries(
-//       NewExtensionRegistryWithDefaults(),
-//       NewKnownTypeRegistryWithDefaults())
+//
+//	NewMessageFactoryWithRegistries(
+//	    NewExtensionRegistryWithDefaults(),
+//	    NewKnownTypeRegistryWithDefaults())
 func NewMessageFactoryWithDefaults() *MessageFactory {
 	return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
 }
@@ -174,28 +175,33 @@
 // GetKnownType will return the reflect.Type for the given message name if it is
 // known. If it is not known, nil is returned.
 func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
-	var msgType reflect.Type
 	if r == nil {
 		// a nil registry behaves the same as zero value instance: only know of well-known types
 		t := proto.MessageType(messageName)
 		if t != nil && isWellKnownType(t) {
-			msgType = t
+			return t
 		}
-	} else {
-		if r.includeDefault {
-			msgType = proto.MessageType(messageName)
-		} else if !r.excludeWkt {
-			t := proto.MessageType(messageName)
-			if t != nil && isWellKnownType(t) {
-				msgType = t
-			}
+		return nil
+	}
+
+	if r.includeDefault {
+		t := proto.MessageType(messageName)
+		if t != nil && isMessage(t) {
+			return t
 		}
-		if msgType == nil {
-			r.mu.RLock()
-			msgType = r.types[messageName]
-			r.mu.RUnlock()
+	} else if !r.excludeWkt {
+		t := proto.MessageType(messageName)
+		if t != nil && isWellKnownType(t) {
+			return t
 		}
 	}
 
-	return msgType
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	return r.types[messageName]
+}
+
+func isMessage(t reflect.Type) bool {
+	_, ok := reflect.Zero(t).Interface().(proto.Message)
+	return ok
 }
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
index 5784d3e..5680dc2 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/text.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -15,7 +15,7 @@
 	"unicode"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/codec"
 	"github.com/jhump/protoreflect/desc"
@@ -208,7 +208,7 @@
 }
 
 func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
-	group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+	group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP
 	if group {
 		var name string
 		if fd.IsExtension() {
@@ -518,7 +518,7 @@
 			if fd == nil {
 				// See if it's a group name
 				for _, field := range m.md.GetFields() {
-					if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+					if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
 						fd = field
 						break
 					}
@@ -581,8 +581,8 @@
 				return err
 			}
 
-		} else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
-			fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+		} else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP ||
+			fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) &&
 			tok.tokTyp.EndToken() != tokenError {
 
 			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
@@ -689,7 +689,7 @@
 
 	var expected string
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if tok.tokTyp == tokenIdent {
 			if tok.val.(string) == "true" {
 				return set(m, fd, true)
@@ -698,17 +698,17 @@
 			}
 		}
 		expected = "boolean value"
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		if tok.tokTyp == tokenString {
 			return set(m, fd, []byte(tok.val.(string)))
 		}
 		expected = "bytes string value"
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		if tok.tokTyp == tokenString {
 			return set(m, fd, tok.val)
 		}
 		expected = "string value"
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		switch tok.tokTyp {
 		case tokenFloat:
 			return set(m, fd, float32(tok.val.(float64)))
@@ -736,7 +736,7 @@
 			}
 		}
 		expected = "float value"
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		switch tok.tokTyp {
 		case tokenFloat:
 			return set(m, fd, tok.val)
@@ -764,9 +764,9 @@
 			}
 		}
 		expected = "float value"
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
 				return err
@@ -775,9 +775,9 @@
 			}
 		}
 		expected = "int value"
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
 				return err
@@ -786,8 +786,8 @@
 			}
 		}
 		expected = "int value"
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
 				return err
@@ -796,8 +796,8 @@
 			}
 		}
 		expected = "unsigned int value"
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
 				return err
@@ -806,7 +806,7 @@
 			}
 		}
 		expected = "unsigned int value"
-	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if tok.tokTyp == tokenIdent {
 			// TODO: add a flag to just ignore unrecognized enum value names?
 			vd := fd.GetEnumType().FindValueByName(tok.val.(string))
@@ -821,8 +821,8 @@
 			}
 		}
 		expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 
 		endTok := tok.tokTyp.EndToken()
 		if endTok != tokenError {
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
new file mode 100644
index 0000000..661b925
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
@@ -0,0 +1,137 @@
+package grpcreflect
+
+import (
+	refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest {
+	var v1 refv1.ServerReflectionRequest
+	v1.Host = v1alpha.Host
+	switch mr := v1alpha.MessageRequest.(type) {
+	case *refv1alpha.ServerReflectionRequest_FileByFilename:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{
+			FileByFilename: mr.FileByFilename,
+		}
+	case *refv1alpha.ServerReflectionRequest_FileContainingSymbol:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{
+			FileContainingSymbol: mr.FileContainingSymbol,
+		}
+	case *refv1alpha.ServerReflectionRequest_FileContainingExtension:
+		if mr.FileContainingExtension != nil {
+			v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &refv1.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
+			AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+		}
+	case *refv1alpha.ServerReflectionRequest_ListServices:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{
+			ListServices: mr.ListServices,
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
+
+func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest {
+	var v1alpha refv1alpha.ServerReflectionRequest
+	v1alpha.Host = v1.Host
+	switch mr := v1.MessageRequest.(type) {
+	case *refv1.ServerReflectionRequest_FileByFilename:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{
+				FileByFilename: mr.FileByFilename,
+			}
+		}
+	case *refv1.ServerReflectionRequest_FileContainingSymbol:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{
+				FileContainingSymbol: mr.FileContainingSymbol,
+			}
+		}
+	case *refv1.ServerReflectionRequest_FileContainingExtension:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &refv1alpha.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{
+				AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+			}
+		}
+	case *refv1.ServerReflectionRequest_ListServices:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{
+				ListServices: mr.ListServices,
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1alpha
+}
+
+func toV1Response(v1alpha *refv1alpha.ServerReflectionResponse) *refv1.ServerReflectionResponse {
+	var v1 refv1.ServerReflectionResponse
+	v1.ValidHost = v1alpha.ValidHost
+	if v1alpha.OriginalRequest != nil {
+		v1.OriginalRequest = toV1Request(v1alpha.OriginalRequest)
+	}
+	switch mr := v1alpha.MessageResponse.(type) {
+	case *refv1alpha.ServerReflectionResponse_FileDescriptorResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_FileDescriptorResponse{
+				FileDescriptorResponse: &refv1.FileDescriptorResponse{
+					FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_AllExtensionNumbersResponse{
+				AllExtensionNumbersResponse: &refv1.ExtensionNumberResponse{
+					BaseTypeName:    mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+					ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_ListServicesResponse:
+		if mr != nil {
+			svcs := make([]*refv1.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+			for i, svc := range mr.ListServicesResponse.GetService() {
+				svcs[i] = &refv1.ServiceResponse{
+					Name: svc.GetName(),
+				}
+			}
+			v1.MessageResponse = &refv1.ServerReflectionResponse_ListServicesResponse{
+				ListServicesResponse: &refv1.ListServiceResponse{
+					Service: svcs,
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_ErrorResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_ErrorResponse{
+				ErrorResponse: &refv1.ErrorResponse{
+					ErrorCode:    mr.ErrorResponse.GetErrorCode(),
+					ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+				},
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
index 3fca3eb..1a35540 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -2,23 +2,39 @@
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"io"
 	"reflect"
 	"runtime"
+	"sort"
 	"sync"
+	"sync/atomic"
+	"time"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"golang.org/x/net/context"
+	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
 	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 	"github.com/jhump/protoreflect/internal"
 )
 
+// If we try the v1 reflection API and get back "not implemented", we'll wait
+// this long before trying v1 again. This allows a long-lived client to
+// dynamically switch from v1alpha to v1 if the underlying server is updated
+// to support it. But it also prevents every stream request from always trying
+// v1 first: if we try it and see it fail, we shouldn't continually retry it
+// if we expect it will fail again.
+const durationBetweenV1Attempts = time.Hour
+
 // elementNotFoundError is the error returned by reflective operations where the
 // server does not recognize a given file name, symbol name, or extension.
 type elementNotFoundError struct {
@@ -51,14 +67,31 @@
 )
 
 func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol {
+		// no need to wrap
+		if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown {
+			// We previously didn't know symbol type but now do?
+			// Create a new error that has the right symbol type.
+			return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol}
+		}
+		return cause
+	}
 	return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
 }
 
 func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag {
+		// no need to wrap
+		return cause
+	}
 	return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
 }
 
 func fileNotFound(file string, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindFile && cause.name == file {
+		// no need to wrap
+		return cause
+	}
 	return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
 }
 
@@ -69,15 +102,15 @@
 		if first {
 			first = false
 		} else {
-			fmt.Fprint(&b, "\ncaused by: ")
+			_, _ = fmt.Fprint(&b, "\ncaused by: ")
 		}
 		switch e.kind {
 		case elementKindSymbol:
-			fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+			_, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
 		case elementKindExtension:
-			fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+			_, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
 		default:
-			fmt.Fprintf(&b, "File not found: %s", e.name)
+			_, _ = fmt.Fprintf(&b, "File not found: %s", e.name)
 		}
 	}
 	return b.String()
@@ -105,79 +138,186 @@
 	extensionNumber     int32
 }
 
+type resolvers struct {
+	descriptorResolver protodesc.Resolver
+	extensionResolver  protoregistry.ExtensionTypeResolver
+}
+
+type fileEntry struct {
+	fd       *desc.FileDescriptor
+	fallback bool
+}
+
 // Client is a client connection to a server for performing reflection calls
 // and resolving remote symbols.
 type Client struct {
-	ctx  context.Context
-	stub rpb.ServerReflectionClient
+	ctx              context.Context
+	now              func() time.Time
+	stubV1           refv1.ServerReflectionClient
+	stubV1Alpha      refv1alpha.ServerReflectionClient
+	allowMissing     atomic.Bool
+	fallbackResolver atomic.Pointer[resolvers]
 
-	connMu sync.Mutex
-	cancel context.CancelFunc
-	stream rpb.ServerReflection_ServerReflectionInfoClient
+	connMu      sync.Mutex
+	cancel      context.CancelFunc
+	stream      refv1.ServerReflection_ServerReflectionInfoClient
+	useV1Alpha  bool
+	lastTriedV1 time.Time
 
 	cacheMu          sync.RWMutex
-	protosByName     map[string]*dpb.FileDescriptorProto
-	filesByName      map[string]*desc.FileDescriptor
-	filesBySymbol    map[string]*desc.FileDescriptor
-	filesByExtension map[extDesc]*desc.FileDescriptor
+	protosByName     map[string]*descriptorpb.FileDescriptorProto
+	filesByName      map[string]fileEntry
+	filesBySymbol    map[string]fileEntry
+	filesByExtension map[extDesc]fileEntry
 }
 
 // NewClient creates a new Client with the given root context and using the
 // given RPC stub for talking to the server.
-func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+//
+// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the
+// v1alpha version of the reflection service. Otherwise, use NewClientAuto
+// instead.
+func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+	return NewClientV1Alpha(ctx, stub)
+}
+
+// NewClientV1Alpha creates a new Client using the v1alpha version of reflection
+// with the given root context and using the given RPC stub for talking to the
+// server.
+func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+	return newClient(ctx, nil, stub)
+}
+
+// NewClientV1 creates a new Client using the v1 version of reflection with the
+// given root context and using the given RPC stub for talking to the server.
+func NewClientV1(ctx context.Context, stub refv1.ServerReflectionClient) *Client {
+	return newClient(ctx, stub, nil)
+}
+
+func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client {
 	cr := &Client{
 		ctx:              ctx,
-		stub:             stub,
-		protosByName:     map[string]*dpb.FileDescriptorProto{},
-		filesByName:      map[string]*desc.FileDescriptor{},
-		filesBySymbol:    map[string]*desc.FileDescriptor{},
-		filesByExtension: map[extDesc]*desc.FileDescriptor{},
+		now:              time.Now,
+		stubV1:           stubv1,
+		stubV1Alpha:      stubv1alpha,
+		protosByName:     map[string]*descriptorpb.FileDescriptorProto{},
+		filesByName:      map[string]fileEntry{},
+		filesBySymbol:    map[string]fileEntry{},
+		filesByExtension: map[extDesc]fileEntry{},
 	}
 	// don't leak a grpc stream
 	runtime.SetFinalizer(cr, (*Client).Reset)
 	return cr
 }
 
+// NewClientAuto creates a new Client that will use either v1 or v1alpha version
+// of reflection (based on what the server supports) with the given root context
+// and using the given client connection.
+//
+// It will first the v1 version of the reflection service. If it gets back an
+// "Unimplemented" error, it will fall back to using the v1alpha version. It
+// will remember which version the server supports for any subsequent operations
+// that need to re-invoke the streaming RPC. But, if it's a very long-lived
+// client, it will periodically retry the v1 version (in case the server is
+// updated to support it also). The period for these retries is every hour.
+func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client {
+	stubv1 := refv1.NewServerReflectionClient(cc)
+	stubv1alpha := refv1alpha.NewServerReflectionClient(cc)
+	return newClient(ctx, stubv1, stubv1alpha)
+}
+
+// AllowMissingFileDescriptors configures the client to allow missing files
+// when building descriptors when possible. Missing files are often fatal
+// errors, but with this option they can sometimes be worked around. Building
+// a schema can only succeed with some files missing if the files in question
+// only provide custom options and/or other unused types.
+func (cr *Client) AllowMissingFileDescriptors() {
+	cr.allowMissing.Store(true)
+}
+
+// AllowFallbackResolver configures the client to allow falling back to the
+// given resolvers if the server is unable to supply descriptors for a particular
+// query. This allows working around issues where servers' reflection service
+// provides an incomplete set of descriptors, but the client has knowledge of
+// the missing descriptors from another source. It is usually most appropriate
+// to pass [protoregistry.GlobalFiles] and [protoregistry.GlobalTypes] as the
+// resolver values.
+//
+// The first value is used as a fallback for FileByFilename and FileContainingSymbol
+// queries. The second value is used as a fallback for FileContainingExtension. It
+// can also be used as a fallback for AllExtensionNumbersForType if it provides
+// a method with the following signature (which *[protoregistry.Types] provides):
+//
+//	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+func (cr *Client) AllowFallbackResolver(descriptors protodesc.Resolver, exts protoregistry.ExtensionTypeResolver) {
+	if descriptors == nil && exts == nil {
+		cr.fallbackResolver.Store(nil)
+	} else {
+		cr.fallbackResolver.Store(&resolvers{
+			descriptorResolver: descriptors,
+			extensionResolver:  exts,
+		})
+	}
+}
+
 // FileByFilename asks the server for a file descriptor for the proto file with
 // the given name.
 func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	if fd, ok := cr.filesByName[filename]; ok {
+	if entry, ok := cr.filesByName[filename]; ok {
 		cr.cacheMu.RUnlock()
-		return fd, nil
+		return entry.fd, nil
 	}
+	// not there? see if we've downloaded the proto
 	fdp, ok := cr.protosByName[filename]
 	cr.cacheMu.RUnlock()
-	// not there? see if we've downloaded the proto
 	if ok {
 		return cr.descriptorFromProto(fdp)
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
 			FileByFilename: filename,
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.GetName() == filename
+	}
+
+	fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept)
 	if isNotFound(err) {
-		// file not found? see if we can look up via alternate name
+		// File not found? see if we can look up via alternate name
 		if alternate, ok := internal.StdFileAliases[filename]; ok {
-			req := &rpb.ServerReflectionRequest{
-				MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+			req := &refv1.ServerReflectionRequest{
+				MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
 					FileByFilename: alternate,
 				},
 			}
-			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
-			if isNotFound(err) {
-				err = fileNotFound(filename, nil)
-			}
-		} else {
-			err = fileNotFound(filename, nil)
+			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept)
 		}
+	}
+	if isNotFound(err) {
+		// Still no? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.descriptorResolver != nil {
+			fileDesc, fallbackErr := resolver.descriptorResolver.FindFileByPath(filename)
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(fileDesc)
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
+	if isNotFound(err) {
+		err = fileNotFound(filename, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
 		err = fileNotFound(filename, e)
 	}
+
 	return fd, err
 }
 
@@ -186,18 +326,36 @@
 func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	fd, ok := cr.filesBySymbol[symbol]
+	entry, ok := cr.filesBySymbol[symbol]
 	cr.cacheMu.RUnlock()
 	if ok {
-		return fd, nil
+		return entry.fd, nil
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileContainingSymbol{
 			FileContainingSymbol: symbol,
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.FindSymbol(symbol) != nil
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+	if isNotFound(err) {
+		// Symbol not found? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.descriptorResolver != nil {
+			d, fallbackErr := resolver.descriptorResolver.FindDescriptorByName(protoreflect.FullName(symbol))
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(d.ParentFile())
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
 	if isNotFound(err) {
 		err = symbolNotFound(symbol, symbolTypeUnknown, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
@@ -212,21 +370,39 @@
 func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+	entry, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
 	cr.cacheMu.RUnlock()
 	if ok {
-		return fd, nil
+		return entry.fd, nil
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
-			FileContainingExtension: &rpb.ExtensionRequest{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileContainingExtension{
+			FileContainingExtension: &refv1.ExtensionRequest{
 				ContainingType:  extendedMessageName,
 				ExtensionNumber: extensionNumber,
 			},
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.FindExtension(extendedMessageName, extensionNumber) != nil
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+	if isNotFound(err) {
+		// Extension not found? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.extensionResolver != nil {
+			extType, fallbackErr := resolver.extensionResolver.FindExtensionByNumber(protoreflect.FullName(extendedMessageName), protoreflect.FieldNumber(extensionNumber))
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(extType.TypeDescriptor().ParentFile())
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
 	if isNotFound(err) {
 		err = extensionNotFound(extendedMessageName, extensionNumber, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
@@ -235,7 +411,7 @@
 	return fd, err
 }
 
-func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+func (cr *Client) getAndCacheFileDescriptors(req *refv1.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) {
 	resp, err := cr.send(req)
 	if err != nil {
 		return nil, err
@@ -253,9 +429,9 @@
 	// should be the answer). If we're looking for a file by name, we can be
 	// smarter and make sure to grab one by name instead of just grabbing the
 	// first one.
-	var firstFd *dpb.FileDescriptorProto
+	var fds []*descriptorpb.FileDescriptorProto
 	for _, fdBytes := range fdResp.FileDescriptorProto {
-		fd := &dpb.FileDescriptorProto{}
+		fd := &descriptorpb.FileDescriptorProto{}
 		if err = proto.Unmarshal(fdBytes, fd); err != nil {
 			return nil, err
 		}
@@ -266,13 +442,6 @@
 		}
 
 		cr.cacheMu.Lock()
-		// see if this file was created and cached concurrently
-		if firstFd == nil {
-			if d, ok := cr.filesByName[fd.GetName()]; ok {
-				cr.cacheMu.Unlock()
-				return d, nil
-			}
-		}
 		// store in cache of raw descriptor protos, but don't overwrite existing protos
 		if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
 			fd = existingFd
@@ -280,120 +449,208 @@
 			cr.protosByName[fd.GetName()] = fd
 		}
 		cr.cacheMu.Unlock()
-		if firstFd == nil {
-			firstFd = fd
-		}
-	}
-	if firstFd == nil {
-		return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+
+		fds = append(fds, fd)
 	}
 
-	return cr.descriptorFromProto(firstFd)
+	// find the right result from the files returned
+	for _, fd := range fds {
+		result, err := cr.descriptorFromProto(fd)
+		if err != nil {
+			return nil, err
+		}
+		if accept(result) {
+			return result, nil
+		}
+	}
+
+	return nil, status.Errorf(codes.NotFound, "response does not include expected file")
 }
 
-func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
-	deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+	allowMissing := cr.allowMissing.Load()
+	deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+	var deferredErr error
+	var missingDeps []int
 	for i, depName := range fd.GetDependency() {
 		if dep, err := cr.FileByFilename(depName); err != nil {
-			return nil, err
+			if _, ok := err.(*elementNotFoundError); !ok || !allowMissing {
+				return nil, err
+			}
+			// We'll ignore for now to see if the file is really necessary.
+			// (If it only supplies custom options, we can get by without it.)
+			if deferredErr == nil {
+				deferredErr = err
+			}
+			missingDeps = append(missingDeps, i)
 		} else {
-			deps[i] = dep
+			deps = append(deps, dep)
 		}
 	}
+	if len(missingDeps) > 0 {
+		fd = fileWithoutDeps(fd, missingDeps)
+	}
 	d, err := desc.CreateFileDescriptor(fd, deps...)
 	if err != nil {
+		if deferredErr != nil {
+			// assume the issue is the missing dep
+			return nil, deferredErr
+		}
 		return nil, err
 	}
-	d = cr.cacheFile(d)
+	d = cr.cacheFile(d, false)
 	return d, nil
 }
 
-func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+func (cr *Client) cacheFile(fd *desc.FileDescriptor, fallback bool) *desc.FileDescriptor {
 	cr.cacheMu.Lock()
 	defer cr.cacheMu.Unlock()
 
-	// cache file descriptor by name, but don't overwrite existing entry
-	// (existing entry could come from concurrent caller)
-	if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
-		return existingFd
+	// Cache file descriptor by name. If we can't overwrite an existing
+	// entry, return it. (Existing entry could come from concurrent caller.)
+	if existing, ok := cr.filesByName[fd.GetName()]; ok && !canOverwrite(existing, fallback) {
+		return existing.fd
 	}
-	cr.filesByName[fd.GetName()] = fd
+	entry := fileEntry{fd: fd, fallback: fallback}
+	cr.filesByName[fd.GetName()] = entry
 
 	// also cache by symbols and extensions
 	for _, m := range fd.GetMessageTypes() {
-		cr.cacheMessageLocked(fd, m)
+		cr.cacheMessageLocked(m, entry)
 	}
 	for _, e := range fd.GetEnumTypes() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, v := range e.GetValues() {
-			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
 		}
 	}
 	for _, e := range fd.GetExtensions() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
-		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
+		cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
 	}
 	for _, s := range fd.GetServices() {
-		cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(s.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, m := range s.GetMethods() {
-			cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(m.GetFullyQualifiedName(), entry)
 		}
 	}
 
 	return fd
 }
 
-func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
-	cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+func (cr *Client) cacheMessageLocked(md *desc.MessageDescriptor, entry fileEntry) {
+	if !cr.maybeCacheFileBySymbol(md.GetFullyQualifiedName(), entry) {
+		return
+	}
 	for _, f := range md.GetFields() {
-		cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+		cr.maybeCacheFileBySymbol(f.GetFullyQualifiedName(), entry)
 	}
 	for _, o := range md.GetOneOfs() {
-		cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+		cr.maybeCacheFileBySymbol(o.GetFullyQualifiedName(), entry)
 	}
 	for _, e := range md.GetNestedEnumTypes() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, v := range e.GetValues() {
-			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
 		}
 	}
 	for _, e := range md.GetNestedExtensions() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
-		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
+		cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
 	}
 	for _, m := range md.GetNestedMessageTypes() {
-		cr.cacheMessageLocked(fd, m) // recurse
+		cr.cacheMessageLocked(m, entry) // recurse
 	}
 }
 
+func canOverwrite(existing fileEntry, fallback bool) bool {
+	return !fallback && existing.fallback
+}
+
+func (cr *Client) maybeCacheFileBySymbol(symbol string, entry fileEntry) bool {
+	existing, ok := cr.filesBySymbol[symbol]
+	if ok && !canOverwrite(existing, entry.fallback) {
+		return false
+	}
+	cr.filesBySymbol[symbol] = entry
+	return true
+}
+
+func (cr *Client) maybeCacheFileByExtension(ext extDesc, entry fileEntry) {
+	existing, ok := cr.filesByExtension[ext]
+	if ok && !canOverwrite(existing, entry.fallback) {
+		return
+	}
+	cr.filesByExtension[ext] = entry
+}
+
 // AllExtensionNumbersForType asks the server for all known extension numbers
 // for the given fully-qualified message name.
 func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
 			AllExtensionNumbersOfType: extendedMessageName,
 		},
 	}
 	resp, err := cr.send(req)
-	if err != nil {
-		if isNotFound(err) {
-			return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
-		}
+	var exts []int32
+	if err != nil && !isNotFound(err) {
+		// If the server doesn't know about the message type and returns "not found",
+		// we'll treat that as "no known extensions" instead of returning an error.
 		return nil, err
 	}
-
-	extResp := resp.GetAllExtensionNumbersResponse()
-	if extResp == nil {
-		return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+	if err == nil {
+		extResp := resp.GetAllExtensionNumbersResponse()
+		if extResp == nil {
+			return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+		}
+		exts = extResp.ExtensionNumber
 	}
-	return extResp.ExtensionNumber, nil
+
+	resolver := cr.fallbackResolver.Load()
+	if resolver != nil && resolver.extensionResolver != nil {
+		type extRanger interface {
+			RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+		}
+		if ranger, ok := resolver.extensionResolver.(extRanger); ok {
+			// Merge results with fallback resolver
+			extSet := map[int32]struct{}{}
+			ranger.RangeExtensionsByMessage(protoreflect.FullName(extendedMessageName), func(extType protoreflect.ExtensionType) bool {
+				extSet[int32(extType.TypeDescriptor().Number())] = struct{}{}
+				return true
+			})
+			if len(extSet) > 0 {
+				// De-dupe with the set of extension numbers we got
+				// from the server and merge the results back into exts.
+				for _, ext := range exts {
+					extSet[ext] = struct{}{}
+				}
+				exts = make([]int32, 0, len(extSet))
+				for ext := range extSet {
+					exts = append(exts, ext)
+				}
+				sort.Slice(exts, func(i, j int) bool { return exts[i] < exts[j] })
+			}
+		}
+	}
+	return exts, nil
 }
 
 // ListServices asks the server for the fully-qualified names of all exposed
 // services.
 func (cr *Client) ListServices() ([]string, error) {
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_ListServices{
 			// proto doesn't indicate any purpose for this value and server impl
 			// doesn't actually use it...
 			ListServices: "*",
@@ -415,10 +672,10 @@
 	return serviceNames, nil
 }
 
-func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) send(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
 	// we allow one immediate retry, in case we have a stale stream
 	// (e.g. closed by server)
-	resp, err := cr.doSend(true, req)
+	resp, err := cr.doSend(req)
 	if err != nil {
 		return nil, err
 	}
@@ -440,16 +697,33 @@
 	return ok && s.Code() == codes.NotFound
 }
 
-func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSend(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
 	// TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
 	// (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
 	// delivered in correct oder.
 	cr.connMu.Lock()
 	defer cr.connMu.Unlock()
-	return cr.doSendLocked(retry, req)
+	return cr.doSendLocked(0, nil, req)
 }
 
-func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+	if attemptCount >= 3 && prevErr != nil {
+		return nil, prevErr
+	}
+	if (status.Code(prevErr) == codes.Unimplemented ||
+		status.Code(prevErr) == codes.Unavailable) &&
+		cr.useV1() {
+		// If v1 is unimplemented, fallback to v1alpha.
+		// We also fallback on unavailable because some servers have been
+		// observed to close the connection/cancel the stream, w/out sending
+		// back status or headers, when the service name is not known. When
+		// this happens, the RPC status code is unavailable.
+		// See https://github.com/fullstorydev/grpcurl/issues/434
+		cr.useV1Alpha = true
+		cr.lastTriedV1 = cr.now()
+	}
+	attemptCount++
+
 	if err := cr.initStreamLocked(); err != nil {
 		return nil, err
 	}
@@ -460,21 +734,15 @@
 			_, err = cr.stream.Recv()
 		}
 		cr.resetLocked()
-		if retry {
-			return cr.doSendLocked(false, req)
-		}
-		return nil, err
+		return cr.doSendLocked(attemptCount, err, req)
 	}
 
-	if resp, err := cr.stream.Recv(); err != nil {
+	resp, err := cr.stream.Recv()
+	if err != nil {
 		cr.resetLocked()
-		if retry {
-			return cr.doSendLocked(false, req)
-		}
-		return nil, err
-	} else {
-		return resp, nil
+		return cr.doSendLocked(attemptCount, err, req)
 	}
+	return resp, nil
 }
 
 func (cr *Client) initStreamLocked() error {
@@ -483,11 +751,38 @@
 	}
 	var newCtx context.Context
 	newCtx, cr.cancel = context.WithCancel(cr.ctx)
+	if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts {
+		// we're due for periodic retry of v1
+		cr.useV1Alpha = false
+	}
+	if cr.useV1() {
+		// try the v1 API
+		streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx)
+		if err == nil {
+			cr.stream = streamv1
+			return nil
+		}
+		if status.Code(err) != codes.Unimplemented {
+			return err
+		}
+		// oh well, fall through below to try v1alpha and update state
+		// so we skip straight to v1alpha next time
+		cr.useV1Alpha = true
+		cr.lastTriedV1 = cr.now()
+	}
 	var err error
-	cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+	streamv1alpha, err := cr.stubV1Alpha.ServerReflectionInfo(newCtx)
+	if err == nil {
+		cr.stream = adaptStreamFromV1Alpha{streamv1alpha}
+		return nil
+	}
 	return err
 }
 
+func (cr *Client) useV1() bool {
+	return !cr.useV1Alpha && cr.stubV1 != nil
+}
+
 // Reset ensures that any active stream with the server is closed, releasing any
 // resources.
 func (cr *Client) Reset() {
@@ -498,7 +793,7 @@
 
 func (cr *Client) resetLocked() {
 	if cr.stream != nil {
-		cr.stream.CloseSend()
+		_ = cr.stream.CloseSend()
 		for {
 			// drain the stream, this covers io.EOF too
 			if _, err := cr.stream.Recv(); err != nil {
@@ -605,6 +900,46 @@
 	}
 }
 
+func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto {
+	// We need to rebuild the file without the missing deps.
+	fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto)
+	newNumDeps := len(fd.GetDependency()) - len(missingDeps)
+	newDeps := make([]string, 0, newNumDeps)
+	remapped := make(map[int]int, newNumDeps)
+	missingIdx := 0
+	for i, dep := range fd.GetDependency() {
+		if missingIdx < len(missingDeps) {
+			if i == missingDeps[missingIdx] {
+				// This dep was missing. Skip it.
+				missingIdx++
+				continue
+			}
+		}
+		remapped[i] = len(newDeps)
+		newDeps = append(newDeps, dep)
+	}
+	// Also rebuild public and weak import slices.
+	newPublic := make([]int32, 0, len(fd.GetPublicDependency()))
+	for _, idx := range fd.GetPublicDependency() {
+		newIdx, ok := remapped[int(idx)]
+		if ok {
+			newPublic = append(newPublic, int32(newIdx))
+		}
+	}
+	newWeak := make([]int32, 0, len(fd.GetWeakDependency()))
+	for _, idx := range fd.GetWeakDependency() {
+		newIdx, ok := remapped[int(idx)]
+		if ok {
+			newWeak = append(newWeak, int32(newIdx))
+		}
+	}
+
+	fd.Dependency = newDeps
+	fd.PublicDependency = newPublic
+	fd.WeakDependency = newWeak
+	return fd
+}
+
 func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
 	// search extensions in this scope
 	for _, ext := range scope.extensions() {
@@ -664,3 +999,20 @@
 	}
 	return scopes
 }
+
+type adaptStreamFromV1Alpha struct {
+	refv1alpha.ServerReflection_ServerReflectionInfoClient
+}
+
+func (a adaptStreamFromV1Alpha) Send(request *refv1.ServerReflectionRequest) error {
+	v1req := toV1AlphaRequest(request)
+	return a.ServerReflection_ServerReflectionInfoClient.Send(v1req)
+}
+
+func (a adaptStreamFromV1Alpha) Recv() (*refv1.ServerReflectionResponse, error) {
+	v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv()
+	if err != nil {
+		return nil, err
+	}
+	return toV1Response(v1resp), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
index c9ef619..7ff1912 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -4,13 +4,19 @@
 	"fmt"
 
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/reflection"
 
 	"github.com/jhump/protoreflect/desc"
 )
 
+// GRPCServer is the interface provided by a gRPC server. In addition to being a
+// service registrar (for registering services and handlers), it also has an
+// accessor for retrieving metadata about all registered services.
+type GRPCServer = reflection.GRPCServer
+
 // LoadServiceDescriptors loads the service descriptors for all services exposed by the
 // given GRPC server.
-func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) {
 	descs := map[string]*desc.ServiceDescriptor{}
 	for name, info := range s.GetServiceInfo() {
 		file, ok := info.Metadata.(string)
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
index 4a8b47a..777c3a4 100644
--- a/vendor/github.com/jhump/protoreflect/internal/standard_files.go
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -9,7 +9,7 @@
 	"io/ioutil"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 // TODO: replace this alias configuration with desc.RegisterImportPath?
@@ -68,7 +68,7 @@
 // name cannot be loaded but is a known standard name, an alias will be tried,
 // so the standard files can be loaded even if linked against older "known bad"
 // versions of packages.
-func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) {
 	fdb := proto.FileDescriptor(file)
 	aliased := false
 	if fdb == nil {
@@ -102,12 +102,12 @@
 // Registered file descriptors are first "proto encoded" (e.g. binary format
 // for the descriptor protos) and then gzipped. So this function gunzips and
 // then unmarshals into a descriptor proto.
-func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) {
 	raw, err := decompress(fdb)
 	if err != nil {
 		return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
 	}
-	fd := dpb.FileDescriptorProto{}
+	fd := descriptorpb.FileDescriptorProto{}
 	if err := proto.Unmarshal(raw, &fd); err != nil {
 		return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
 	}
diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
index c903d4b..25376c7 100644
--- a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
+++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
@@ -1,86 +1,20 @@
 package internal
 
 import (
-	"reflect"
-
 	"github.com/golang/protobuf/proto"
 )
 
-var typeOfBytes = reflect.TypeOf([]byte(nil))
-
 // GetUnrecognized fetches the bytes of unrecognized fields for the given message.
 func GetUnrecognized(msg proto.Message) []byte {
-	val := reflect.Indirect(reflect.ValueOf(msg))
-	u := val.FieldByName("XXX_unrecognized")
-	if u.IsValid() && u.Type() == typeOfBytes {
-		return u.Interface().([]byte)
-	}
-
-	// Fallback to reflection for API v2 messages
-	get, _, _, ok := unrecognizedGetSetMethods(val)
-	if !ok {
-		return nil
-	}
-
-	return get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+	return proto.MessageReflect(msg).GetUnknown()
 }
 
 // SetUnrecognized adds the given bytes to the unrecognized fields for the given message.
 func SetUnrecognized(msg proto.Message, data []byte) {
-	val := reflect.Indirect(reflect.ValueOf(msg))
-	u := val.FieldByName("XXX_unrecognized")
-	if u.IsValid() && u.Type() == typeOfBytes {
-		// Just store the bytes in the unrecognized field
-		ub := u.Interface().([]byte)
-		ub = append(ub, data...)
-		u.Set(reflect.ValueOf(ub))
-		return
-	}
-
-	// Fallback to reflection for API v2 messages
-	get, set, argType, ok := unrecognizedGetSetMethods(val)
-	if !ok {
-		return
-	}
-
-	existing := get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+	refl := proto.MessageReflect(msg)
+	existing := refl.GetUnknown()
 	if len(existing) > 0 {
 		data = append(existing, data...)
 	}
-	set.Call([]reflect.Value{reflect.ValueOf(data).Convert(argType)})
-}
-
-func unrecognizedGetSetMethods(val reflect.Value) (get reflect.Value, set reflect.Value, argType reflect.Type, ok bool) {
-	// val could be an APIv2 message. We use reflection to interact with
-	// this message so that we don't have a hard dependency on the new
-	// version of the protobuf package.
-	refMethod := val.MethodByName("ProtoReflect")
-	if !refMethod.IsValid() {
-		if val.CanAddr() {
-			refMethod = val.Addr().MethodByName("ProtoReflect")
-		}
-		if !refMethod.IsValid() {
-			return
-		}
-	}
-	refType := refMethod.Type()
-	if refType.NumIn() != 0 || refType.NumOut() != 1 {
-		return
-	}
-	ref := refMethod.Call([]reflect.Value(nil))
-	getMethod, setMethod := ref[0].MethodByName("GetUnknown"), ref[0].MethodByName("SetUnknown")
-	if !getMethod.IsValid() || !setMethod.IsValid() {
-		return
-	}
-	getType := getMethod.Type()
-	setType := setMethod.Type()
-	if getType.NumIn() != 0 || getType.NumOut() != 1 || setType.NumIn() != 1 || setType.NumOut() != 0 {
-		return
-	}
-	arg := setType.In(0)
-	if !arg.ConvertibleTo(typeOfBytes) || getType.Out(0) != arg {
-		return
-	}
-
-	return getMethod, setMethod, arg, true
+	refl.SetUnknown(data)
 }
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 0af08e6..4528059 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,9 +1,8 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
 before:
   hooks:
     - ./gen.sh
-    - go install mvdan.cc/garble@latest
 
 builds:
   -
@@ -32,7 +31,6 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
   -
     id: "s2d"
     binary: s2d
@@ -59,7 +57,6 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
   -
     id: "s2sx"
     binary: s2sx
@@ -87,21 +84,11 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
 
 archives:
   -
     id: s2-binaries
-    name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
-    replacements:
-      aix: AIX
-      darwin: OSX
-      linux: Linux
-      windows: Windows
-      386: i386
-      amd64: x86_64
-      freebsd: FreeBSD
-      netbsd: NetBSD
+    name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     format_overrides:
       - goos: windows
         format: zip
@@ -112,7 +99,7 @@
 checksum:
   name_template: 'checksums.txt'
 snapshot:
-  name_template: "{{ .Tag }}-next"
+  version_template: "{{ .Tag }}-next"
 changelog:
   sort: asc
   filters:
@@ -125,7 +112,7 @@
 
 nfpms:
   -
-    file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+    file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     vendor: Klaus Post
     homepage: https://github.com/klauspost/compress
     maintainer: Klaus Post <klauspost@gmail.com>
@@ -134,8 +121,3 @@
     formats:
       - deb
       - rpm
-    replacements:
-      darwin: Darwin
-      linux: Linux
-      freebsd: FreeBSD
-      amd64: x86_64
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index ad5c63a..244ee19 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -9,14 +9,192 @@
 * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.

 * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.

 * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.

-* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.

 

 [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)

 [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)

 [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)

 

+# package usage

+

+Use `go get github.com/klauspost/compress@latest` to add it to your project.

+

+This package will support the current Go version and 2 versions back.

+

+* Use the `nounsafe` tag to disable all use of the "unsafe" package.

+* Use the `noasm` tag to disable all assembly across packages.

+

+Use the links above for more information on each.

+

 # changelog

 

+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)

+  * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036

+  * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028

+  * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043

+  * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045

+  * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048

+  * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049

+  * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050

+

+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)

+  * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017

+  * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014

+  * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011

+  * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013

+

+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)

+	* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978

+	* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002

+	* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982

+	* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007

+	* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996

+

+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)

+	* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949

+	* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963

+	* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971

+	* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951

+

+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)

+	* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885

+	* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938

+

+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)

+	* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927

+	* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930

+  

+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)

+	* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923

+	* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925

+  

+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)

+	* flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912

+	* zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908

+	* zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913

+	* zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910

+	* s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917

+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918

+

+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)

+	* huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887

+	* huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886

+	* gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892

+	* gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890

+	* gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891

+

+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)

+	* fse: Fix max header size https://github.com/klauspost/compress/pull/881

+	* zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877

+	* gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883

+

+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)

+	* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876

+

+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)

+	* s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871

+	* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869

+	* s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867

+

+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)

+	* Add experimental dictionary builder  https://github.com/klauspost/compress/pull/853

+	* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838

+	* flate: Add limited window compression https://github.com/klauspost/compress/pull/843

+	* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839

+	* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837

+	* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860

+

+<details>

+	<summary>See changes to v1.16.x</summary>

+

+   

+* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)

+	* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829

+	* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832

+

+* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)

+	* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806

+	* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824

+	* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815

+	* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663

+

+* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)

+	* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802

+	* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804

+

+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)

+	* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784

+	* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792

+	* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785

+	* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795

+	* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779

+	* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780

+	* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799

+

+* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)

+	* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776

+	* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767

+	* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766

+	* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773

+	* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774

+

+* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)

+	* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support.  https://github.com/klauspost/compress/pull/685

+	* s2: Add Compression Size Estimate.  https://github.com/klauspost/compress/pull/752

+	* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755

+	* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748

+	* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747

+	* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746

+</details>

+

+<details>

+	<summary>See changes to v1.15.x</summary>

+	

+* Jan 21st, 2023 (v1.15.15)

+	* deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739

+	* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728

+	* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745

+	* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740

+

+* Jan 3rd, 2023 (v1.15.14)

+

+	* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718

+	* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720

+	* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722

+	* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723

+

+* Dec 11, 2022 (v1.15.13)

+	* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder  https://github.com/klauspost/compress/pull/691

+	* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708

+

+* Oct 26, 2022 (v1.15.12)

+

+	* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680

+	* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683

+

+* Sept 26, 2022 (v1.15.11)

+

+	* flate: Improve level 1-3 compression  https://github.com/klauspost/compress/pull/678

+	* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677

+	* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668

+	* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667

+

+* Sept 16, 2022 (v1.15.10)

+

+	* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649

+	* Add Go 1.19 - deprecate Go 1.16  https://github.com/klauspost/compress/pull/651

+	* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656

+	* zstd: Improve "better" compression  https://github.com/klauspost/compress/pull/657

+	* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658

+	* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635

+	* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646

+	* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659

+

+* July 21, 2022 (v1.15.9)

+

+	* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645

+	* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644

+	* zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643

+

 * July 13, 2022 (v1.15.8)

 

 	* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641

@@ -57,7 +235,7 @@
 	* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599

 	* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593

 	* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586

-	* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590

+	* flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590

 

 

 * May 11, 2022 (v1.15.4)

@@ -84,27 +262,29 @@
 	* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)

 

 * Mar 3, 2022 (v1.15.0)

-	* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)

-	* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)

+	* zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)

+	* zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)

 	* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)

-	* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)

-	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)

-	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)

+	* flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)

+	* gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)

+	* gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)

 

-<details>

-	<summary>See  Details</summary>

 Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.

 

 Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.

 

 While the release has been extensively tested, it is recommended to testing when upgrading.

+

 </details>

 

+<details>

+	<summary>See changes to v1.14.x</summary>

+	

 * Feb 22, 2022 (v1.14.4)

 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)

 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)

 	* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501)  #501

-	* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)

+	* huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)

 

 * Feb 17, 2022 (v1.14.3)

 	* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494)  [#478](https://github.com/klauspost/compress/pull/478)

@@ -125,6 +305,7 @@
 	* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)

 	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)

 	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)

+</details>

 

 <details>

 	<summary>See changes to v1.13.x</summary>

@@ -205,7 +386,7 @@
 	* s2: Fix binaries.

 

 * Feb 25, 2021 (v1.11.8)

-	* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.

+	* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.

 	* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)

 	* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)

 	* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)

@@ -384,7 +565,7 @@
 * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.

 * Feb 19, 2016: Handle small payloads faster in level 1-3.

 * Feb 19, 2016: Added faster level 2 + 3 compression modes.

-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.

+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.

 * Feb 14, 2016: Snappy: Merge upstream changes. 

 * Feb 14, 2016: Snappy: Fix aggressive skipping.

 * Feb 14, 2016: Snappy: Update benchmark.

@@ -410,12 +591,14 @@
 

 The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:

 

-| old import         | new import                              | Documentation

-|--------------------|-----------------------------------------|--------------------|

-| `compress/gzip`    | `github.com/klauspost/compress/gzip`    | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)

-| `compress/zlib`    | `github.com/klauspost/compress/zlib`    | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)

-| `archive/zip`      | `github.com/klauspost/compress/zip`     | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)

-| `compress/flate`   | `github.com/klauspost/compress/flate`   | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)

+Typical speed is about 2x of the standard library packages.

+

+| old import       | new import                            | Documentation                                                           |

+|------------------|---------------------------------------|-------------------------------------------------------------------------|

+| `compress/gzip`  | `github.com/klauspost/compress/gzip`  | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)   |

+| `compress/zlib`  | `github.com/klauspost/compress/zlib`  | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)   |

+| `archive/zip`    | `github.com/klauspost/compress/zip`   | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)     |

+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |

 

 * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).

 

@@ -431,6 +614,8 @@
 

 For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).

 

+To disable all assembly add `-tags=noasm`. This works across all packages.

+

 # Stateless compression

 

 This package offers stateless compression as a special option for gzip/deflate. 

@@ -449,7 +634,7 @@
 

 A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:

 

-```

+```go

 	// replace 'ioutil.Discard' with your output.

 	gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)

 	if err != nil {

@@ -468,84 +653,6 @@
 Compression is almost always worse than the fastest compression level 

 and each write will allocate (a little) memory. 

 

-# Performance Update 2018

-

-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.

-

-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.

-

-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.

-

-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).

-

-

-## Overall differences.

-

-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.

-

-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.

-

-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.

-

-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.

-

-## Web Content

-

-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.

-

-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.

-

-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.

-

-## Object files

-

-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.

-

-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. 

-

-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.

-

-## Highly Compressible File

-

-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.

-

-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.

-

-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".

-

-## Medium-High Compressible

-

-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.

-

-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.

-

-## Medium Compressible

-

-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.

-

-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.

-

-

-## Un-compressible Content

-

-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed.  The only downside is that it might skip some compressible data on false detections.

-

-

-## Huffman only compression

-

-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. 

-

-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).

-

-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.

-

-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). 

-

-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.

-

-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).

-

-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.

 

 # Other packages

 

@@ -554,6 +661,10 @@
 * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.

 * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.

 * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.

+* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.

+* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.

+* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.

+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.

 

 # license

 

diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md
new file mode 100644
index 0000000..ca6685e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/SECURITY.md
@@ -0,0 +1,25 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Vulnerability Definition
+
+A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
+
+Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
+
+Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
+
+It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
+
+Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 0000000..af53fb8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	NoCompression      = 0
+	BestSpeed          = 1
+	BestCompression    = 9
+	DefaultCompression = -1
+
+	// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+	// entropy encoding. This mode is useful in compressing data that has
+	// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+	// that lacks an entropy encoder. Compression gains are achieved when
+	// certain bytes in the input stream occur more frequently than others.
+	//
+	// Note that HuffmanOnly produces a compressed output that is
+	// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+	// continue to be able to decompress this output.
+	HuffmanOnly         = -2
+	ConstantCompression = HuffmanOnly // compatibility alias.
+
+	logWindowSize    = 15
+	windowSize       = 1 << logWindowSize
+	windowMask       = windowSize - 1
+	logMaxOffsetSize = 15  // Standard DEFLATE
+	minMatchLength   = 4   // The smallest match that the compressor looks for
+	maxMatchLength   = 258 // The longest match for the compressor
+	minOffsetSize    = 1   // The shortest offset that makes any sense
+
+	// The maximum number of tokens we will encode at the time.
+	// Smaller sizes usually creates less optimal blocks.
+	// Bigger can make context switching slow.
+	// We use this for levels 7-9, so we make it big.
+	maxFlateBlockTokens = 1 << 15
+	maxStoreBlockSize   = 65535
+	hashBits            = 17 // After 17 performance degrades
+	hashSize            = 1 << hashBits
+	hashMask            = (1 << hashBits) - 1
+	hashShift           = (hashBits + minMatchLength - 1) / minMatchLength
+	maxHashOffset       = 1 << 28
+
+	skipNever = math.MaxInt32
+
+	debugDeflate = false
+)
+
+type compressionLevel struct {
+	good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+	{}, // 0
+	// Level 1-6 uses specialized algorithm - values not used
+	{0, 0, 0, 0, 0, 1},
+	{0, 0, 0, 0, 0, 2},
+	{0, 0, 0, 0, 0, 3},
+	{0, 0, 0, 0, 0, 4},
+	{0, 0, 0, 0, 0, 5},
+	{0, 0, 0, 0, 0, 6},
+	// Levels 7-9 use increasingly more lazy matching
+	// and increasingly stringent conditions for "good enough".
+	{8, 12, 16, 24, skipNever, 7},
+	{16, 30, 40, 64, skipNever, 8},
+	{32, 258, 258, 1024, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+	// deflate state
+	length         int
+	offset         int
+	maxInsertIndex int
+	chainHead      int
+	hashOffset     int
+
+	ii uint16 // position of last match, intended to overflow to reset.
+
+	// input window: unprocessed data is window[index:windowEnd]
+	index     int
+	hashMatch [maxMatchLength + minMatchLength]uint32
+
+	// Input hash chains
+	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
+	// If hashHead[hashValue] is within the current window, then
+	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+	// with the same hash value.
+	hashHead [hashSize]uint32
+	hashPrev [windowSize]uint32
+}
+
+type compressor struct {
+	compressionLevel
+
+	h *huffmanEncoder
+	w *huffmanBitWriter
+
+	// compression algorithm
+	fill func(*compressor, []byte) int // copy data to window
+	step func(*compressor)             // process window
+
+	window     []byte
+	windowEnd  int
+	blockStart int // window index where current tokens start
+	err        error
+
+	// queued output tokens
+	tokens tokens
+	fast   fastEnc
+	state  *advancedState
+
+	sync          bool // requesting flush
+	byteAvailable bool // if true, still need to process window[index-1].
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+	s := d.state
+	if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+		// shift the window by windowSize
+		//copy(d.window[:], d.window[windowSize:2*windowSize])
+		*(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
+		s.index -= windowSize
+		d.windowEnd -= windowSize
+		if d.blockStart >= windowSize {
+			d.blockStart -= windowSize
+		} else {
+			d.blockStart = math.MaxInt32
+		}
+		s.hashOffset += windowSize
+		if s.hashOffset > maxHashOffset {
+			delta := s.hashOffset - 1
+			s.hashOffset -= delta
+			s.chainHead -= delta
+			// Iterate over slices instead of arrays to avoid copying
+			// the entire table onto the stack (Issue #18625).
+			for i, v := range s.hashPrev[:] {
+				if int(v) > delta {
+					s.hashPrev[i] = uint32(int(v) - delta)
+				} else {
+					s.hashPrev[i] = 0
+				}
+			}
+			for i, v := range s.hashHead[:] {
+				if int(v) > delta {
+					s.hashHead[i] = uint32(int(v) - delta)
+				} else {
+					s.hashHead[i] = 0
+				}
+			}
+		}
+	}
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+	if index > 0 || eof {
+		var window []byte
+		if d.blockStart <= index {
+			window = d.window[d.blockStart:index]
+		}
+		d.blockStart = index
+		//d.w.writeBlock(tok, eof, window)
+		d.w.writeBlockDynamic(tok, eof, window, d.sync)
+		return d.w.err
+	}
+	return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+	if index > 0 || eof {
+		if d.blockStart <= index {
+			window := d.window[d.blockStart:index]
+			// If we removed less than a 64th of all literals
+			// we huffman compress the block.
+			if int(tok.n) > len(window)-int(tok.n>>6) {
+				d.w.writeBlockHuff(eof, window, d.sync)
+			} else {
+				// Write a dynamic huffman block.
+				d.w.writeBlockDynamic(tok, eof, window, d.sync)
+			}
+		} else {
+			d.w.writeBlock(tok, eof, nil)
+		}
+		d.blockStart = index
+		return d.w.err
+	}
+	return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+	// Do not fill window if we are in store-only or huffman mode.
+	if d.level <= 0 && d.level > -MinCustomWindowSize {
+		return
+	}
+	if d.fast != nil {
+		// encode the last data, but discard the result
+		if len(b) > maxMatchOffset {
+			b = b[len(b)-maxMatchOffset:]
+		}
+		d.fast.Encode(&d.tokens, b)
+		d.tokens.Reset()
+		return
+	}
+	s := d.state
+	// If we are given too much, cut it.
+	if len(b) > windowSize {
+		b = b[len(b)-windowSize:]
+	}
+	// Add all to window.
+	n := copy(d.window[d.windowEnd:], b)
+
+	// Calculate 256 hashes at the time (more L1 cache hits)
+	loops := (n + 256 - minMatchLength) / 256
+	for j := 0; j < loops; j++ {
+		startindex := j * 256
+		end := startindex + 256 + minMatchLength - 1
+		if end > n {
+			end = n
+		}
+		tocheck := d.window[startindex:end]
+		dstSize := len(tocheck) - minMatchLength + 1
+
+		if dstSize <= 0 {
+			continue
+		}
+
+		dst := s.hashMatch[:dstSize]
+		bulkHash4(tocheck, dst)
+		var newH uint32
+		for i, val := range dst {
+			di := i + startindex
+			newH = val & hashMask
+			// Get previous value with the same hash.
+			// Our chain should point to the previous value.
+			s.hashPrev[di&windowMask] = s.hashHead[newH]
+			// Set the head of the hash chain to us.
+			s.hashHead[newH] = uint32(di + s.hashOffset)
+		}
+	}
+	// Update window information.
+	d.windowEnd += n
+	s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
+	minMatchLook := maxMatchLength
+	if lookahead < minMatchLook {
+		minMatchLook = lookahead
+	}
+
+	win := d.window[0 : pos+minMatchLook]
+
+	// We quit when we get a match that's at least nice long
+	nice := len(win) - pos
+	if d.nice < nice {
+		nice = d.nice
+	}
+
+	// If we've got a match that's good enough, only look in 1/4 the chain.
+	tries := d.chain
+	length = minMatchLength - 1
+
+	wEnd := win[pos+length]
+	wPos := win[pos:]
+	minIndex := pos - windowSize
+	if minIndex < 0 {
+		minIndex = 0
+	}
+	offset = 0
+
+	if d.chain < 100 {
+		for i := prevHead; tries > 0; tries-- {
+			if wEnd == win[i+length] {
+				n := matchLen(win[i:i+minMatchLook], wPos)
+				if n > length {
+					length = n
+					offset = pos - i
+					ok = true
+					if n >= nice {
+						// The match is good enough that we don't try to find a better one.
+						break
+					}
+					wEnd = win[pos+n]
+				}
+			}
+			if i <= minIndex {
+				// hashPrev[i & windowMask] has already been overwritten, so stop now.
+				break
+			}
+			i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+			if i < minIndex {
+				break
+			}
+		}
+		return
+	}
+
+	// Minimum gain to accept a match.
+	cGain := 4
+
+	// Some like it higher (CSV), some like it lower (JSON)
+	const baseCost = 3
+	// Base is 4 bytes at with an additional cost.
+	// Matches must be better than this.
+
+	for i := prevHead; tries > 0; tries-- {
+		if wEnd == win[i+length] {
+			n := matchLen(win[i:i+minMatchLook], wPos)
+			if n > length {
+				// Calculate gain. Estimate
+				newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+				//fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
+				if newGain > cGain {
+					length = n
+					offset = pos - i
+					cGain = newGain
+					ok = true
+					if n >= nice {
+						// The match is good enough that we don't try to find a better one.
+						break
+					}
+					wEnd = win[pos+n]
+				}
+			}
+		}
+		if i <= minIndex {
+			// hashPrev[i & windowMask] has already been overwritten, so stop now.
+			break
+		}
+		i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+		if i < minIndex {
+			break
+		}
+	}
+	return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.writeBytes(buf)
+	return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+	return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+	return (u * prime4bytes) >> (32 - h)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+	if len(b) < 4 {
+		return
+	}
+	hb := binary.LittleEndian.Uint32(b)
+
+	dst[0] = hash4u(hb, hashBits)
+	end := len(b) - 4 + 1
+	for i := 1; i < end; i++ {
+		hb = (hb >> 8) | uint32(b[i+3])<<24
+		dst[i] = hash4u(hb, hashBits)
+	}
+}
+
+func (d *compressor) initDeflate() {
+	d.window = make([]byte, 2*windowSize)
+	d.byteAvailable = false
+	d.err = nil
+	if d.state == nil {
+		return
+	}
+	s := d.state
+	s.index = 0
+	s.hashOffset = 1
+	s.length = minMatchLength - 1
+	s.offset = 0
+	s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+	s := d.state
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = debugDeflate
+
+	if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+	if d.windowEnd != s.index && d.chain > 100 {
+		// Get literal huffman coder.
+		if d.h == nil {
+			d.h = newHuffmanEncoder(maxFlateBlockTokens)
+		}
+		var tmp [256]uint16
+		for _, v := range d.window[s.index:d.windowEnd] {
+			tmp[v]++
+		}
+		d.h.generate(tmp[:], 15)
+	}
+
+	s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+	for {
+		if sanity && s.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - s.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && s.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				// Flush current output block if any.
+				if d.byteAvailable {
+					// There is still one pending token that needs to be flushed
+					d.tokens.AddLiteral(d.window[s.index-1])
+					d.byteAvailable = false
+				}
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+						return
+					}
+					d.tokens.Reset()
+				}
+				return
+			}
+		}
+		if s.index < s.maxInsertIndex {
+			// Update the hash
+			hash := hash4(d.window[s.index:])
+			ch := s.hashHead[hash]
+			s.chainHead = int(ch)
+			s.hashPrev[s.index&windowMask] = ch
+			s.hashHead[hash] = uint32(s.index + s.hashOffset)
+		}
+		prevLength := s.length
+		prevOffset := s.offset
+		s.length = minMatchLength - 1
+		s.offset = 0
+		minIndex := s.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+			if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
+				s.length = newLength
+				s.offset = newOffset
+			}
+		}
+
+		if prevLength >= minMatchLength && s.length <= prevLength {
+			// No better match, but check for better match at end...
+			//
+			// Skip forward a number of bytes.
+			// Offset of 2 seems to yield best results. 3 is sometimes better.
+			const checkOff = 2
+
+			// Check all, except full length
+			if prevLength < maxMatchLength-checkOff {
+				prevIndex := s.index - 1
+				if prevIndex+prevLength < s.maxInsertIndex {
+					end := lookahead
+					if lookahead > maxMatchLength+checkOff {
+						end = maxMatchLength + checkOff
+					}
+					end += prevIndex
+
+					// Hash at match end.
+					h := hash4(d.window[prevIndex+prevLength:])
+					ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+					if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+						length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+						// It seems like a pure length metric is best.
+						if length > prevLength {
+							prevLength = length
+							prevOffset = prevIndex - ch2
+
+							// Extend back...
+							for i := checkOff - 1; i >= 0; i-- {
+								if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
+									// Emit tokens we "owe"
+									for j := 0; j <= i; j++ {
+										d.tokens.AddLiteral(d.window[prevIndex+j])
+										if d.tokens.n == maxFlateBlockTokens {
+											// The block includes the current character
+											if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+												return
+											}
+											d.tokens.Reset()
+										}
+										s.index++
+										if s.index < s.maxInsertIndex {
+											h := hash4(d.window[s.index:])
+											ch := s.hashHead[h]
+											s.chainHead = int(ch)
+											s.hashPrev[s.index&windowMask] = ch
+											s.hashHead[h] = uint32(s.index + s.hashOffset)
+										}
+									}
+									break
+								} else {
+									prevLength++
+								}
+							}
+						} else if false {
+							// Check one further ahead.
+							// Only rarely better, disabled for now.
+							prevIndex++
+							h := hash4(d.window[prevIndex+prevLength:])
+							ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+							if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+								length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+								// It seems like a pure length metric is best.
+								if length > prevLength+checkOff {
+									prevLength = length
+									prevOffset = prevIndex - ch2
+									prevIndex--
+
+									// Extend back...
+									for i := checkOff; i >= 0; i-- {
+										if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
+											// Emit tokens we "owe"
+											for j := 0; j <= i; j++ {
+												d.tokens.AddLiteral(d.window[prevIndex+j])
+												if d.tokens.n == maxFlateBlockTokens {
+													// The block includes the current character
+													if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+														return
+													}
+													d.tokens.Reset()
+												}
+												s.index++
+												if s.index < s.maxInsertIndex {
+													h := hash4(d.window[s.index:])
+													ch := s.hashHead[h]
+													s.chainHead = int(ch)
+													s.hashPrev[s.index&windowMask] = ch
+													s.hashHead[h] = uint32(s.index + s.hashOffset)
+												}
+											}
+											break
+										} else {
+											prevLength++
+										}
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			newIndex := s.index + prevLength - 1
+			// Calculate missing hashes
+			end := newIndex
+			if end > s.maxInsertIndex {
+				end = s.maxInsertIndex
+			}
+			end += minMatchLength - 1
+			startindex := s.index + 1
+			if startindex > s.maxInsertIndex {
+				startindex = s.maxInsertIndex
+			}
+			tocheck := d.window[startindex:end]
+			dstSize := len(tocheck) - minMatchLength + 1
+			if dstSize > 0 {
+				dst := s.hashMatch[:dstSize]
+				bulkHash4(tocheck, dst)
+				var newH uint32
+				for i, val := range dst {
+					di := i + startindex
+					newH = val & hashMask
+					// Get previous value with the same hash.
+					// Our chain should point to the previous value.
+					s.hashPrev[di&windowMask] = s.hashHead[newH]
+					// Set the head of the hash chain to us.
+					s.hashHead[newH] = uint32(di + s.hashOffset)
+				}
+			}
+
+			s.index = newIndex
+			d.byteAvailable = false
+			s.length = minMatchLength - 1
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+					return
+				}
+				d.tokens.Reset()
+			}
+			s.ii = 0
+		} else {
+			// Reset, if we got a match this run.
+			if s.length >= minMatchLength {
+				s.ii = 0
+			}
+			// We have a byte waiting. Emit it.
+			if d.byteAvailable {
+				s.ii++
+				d.tokens.AddLiteral(d.window[s.index-1])
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+						return
+					}
+					d.tokens.Reset()
+				}
+				s.index++
+
+				// If we have a long run of no matches, skip additional bytes
+				// Resets when s.ii overflows after 64KB.
+				if n := int(s.ii) - d.chain; n > 0 {
+					n = 1 + int(n>>6)
+					for j := 0; j < n; j++ {
+						if s.index >= d.windowEnd-1 {
+							break
+						}
+						d.tokens.AddLiteral(d.window[s.index-1])
+						if d.tokens.n == maxFlateBlockTokens {
+							if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+								return
+							}
+							d.tokens.Reset()
+						}
+						// Index...
+						if s.index < s.maxInsertIndex {
+							h := hash4(d.window[s.index:])
+							ch := s.hashHead[h]
+							s.chainHead = int(ch)
+							s.hashPrev[s.index&windowMask] = ch
+							s.hashHead[h] = uint32(s.index + s.hashOffset)
+						}
+						s.index++
+					}
+					// Flush last byte
+					d.tokens.AddLiteral(d.window[s.index-1])
+					d.byteAvailable = false
+					// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+					if d.tokens.n == maxFlateBlockTokens {
+						if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+							return
+						}
+						d.tokens.Reset()
+					}
+				}
+			} else {
+				s.index++
+				d.byteAvailable = true
+			}
+		}
+	}
+}
+
+func (d *compressor) store() {
+	if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+		d.windowEnd = 0
+	}
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+	if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+		return
+	}
+	d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+	d.err = d.w.err
+	d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+	// We only compress if we have maxStoreBlockSize.
+	if d.windowEnd < len(d.window) {
+		if !d.sync {
+			return
+		}
+		// Handle extremely small sizes.
+		if d.windowEnd < 128 {
+			if d.windowEnd == 0 {
+				return
+			}
+			if d.windowEnd <= 32 {
+				d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+			} else {
+				d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+				d.err = d.w.err
+			}
+			d.tokens.Reset()
+			d.windowEnd = 0
+			d.fast.Reset()
+			return
+		}
+	}
+
+	d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+	// If we made zero matches, store the block as is.
+	if d.tokens.n == 0 {
+		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+		// If we removed less than 1/16th, huffman compress the block.
+	} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+		d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+		d.err = d.w.err
+	} else {
+		d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+		d.err = d.w.err
+	}
+	d.tokens.Reset()
+	d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+	if d.err != nil {
+		return 0, d.err
+	}
+	n = len(b)
+	for len(b) > 0 {
+		if d.windowEnd == len(d.window) || d.sync {
+			d.step(d)
+		}
+		b = b[d.fill(d, b):]
+		if d.err != nil {
+			return 0, d.err
+		}
+	}
+	return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+	d.sync = true
+	if d.err != nil {
+		return d.err
+	}
+	d.step(d)
+	if d.err == nil {
+		d.w.writeStoredHeader(0, false)
+		d.w.flush()
+		d.err = d.w.err
+	}
+	d.sync = false
+	return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+	d.w = newHuffmanBitWriter(w)
+
+	switch {
+	case level == NoCompression:
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).store
+	case level == ConstantCompression:
+		d.w.logNewTablePenalty = 10
+		d.window = make([]byte, 32<<10)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeHuff
+	case level == DefaultCompression:
+		level = 5
+		fallthrough
+	case level >= 1 && level <= 6:
+		d.w.logNewTablePenalty = 7
+		d.fast = newFastEnc(level)
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeFast
+	case 7 <= level && level <= 9:
+		d.w.logNewTablePenalty = 8
+		d.state = &advancedState{}
+		d.compressionLevel = levels[level]
+		d.initDeflate()
+		d.fill = (*compressor).fillDeflate
+		d.step = (*compressor).deflateLazy
+	case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
+		d.w.logNewTablePenalty = 7
+		d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeFast
+	default:
+		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+	}
+	d.level = level
+	return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+	d.w.reset(w)
+	d.sync = false
+	d.err = nil
+	// We only need to reset a few things for Snappy.
+	if d.fast != nil {
+		d.fast.Reset()
+		d.windowEnd = 0
+		d.tokens.Reset()
+		return
+	}
+	switch d.compressionLevel.chain {
+	case 0:
+		// level was NoCompression or ConstantCompression.
+		d.windowEnd = 0
+	default:
+		s := d.state
+		s.chainHead = -1
+		for i := range s.hashHead {
+			s.hashHead[i] = 0
+		}
+		for i := range s.hashPrev {
+			s.hashPrev[i] = 0
+		}
+		s.hashOffset = 1
+		s.index, d.windowEnd = 0, 0
+		d.blockStart, d.byteAvailable = 0, false
+		d.tokens.Reset()
+		s.length = minMatchLength - 1
+		s.offset = 0
+		s.ii = 0
+		s.maxInsertIndex = 0
+	}
+}
+
+func (d *compressor) close() error {
+	if d.err != nil {
+		return d.err
+	}
+	d.sync = true
+	d.step(d)
+	if d.err != nil {
+		return d.err
+	}
+	if d.w.writeStoredHeader(0, true); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.flush()
+	d.w.reset(nil)
+	return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+	var dw Writer
+	if err := dw.d.init(w, level); err != nil {
+		return nil, err
+	}
+	return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary.  The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output.  The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+	zw, err := NewWriter(w, level)
+	if err != nil {
+		return nil, err
+	}
+	zw.d.fillWindow(dict)
+	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+	return zw, err
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = 32
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = windowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+	if windowSize < MinCustomWindowSize {
+		return nil, errors.New("flate: requested window size less than MinWindowSize")
+	}
+	if windowSize > MaxCustomWindowSize {
+		return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
+	}
+	var dw Writer
+	if err := dw.d.init(w, -windowSize); err != nil {
+		return nil, err
+	}
+	return &dw, nil
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+	d    compressor
+	dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+	return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+	// For more about flushing:
+	// http://www.bolet.org/~pornin/deflate-flush.html
+	return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+	return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+	if len(w.dict) > 0 {
+		// w was created with NewWriterDict
+		w.d.reset(dst)
+		if dst != nil {
+			w.d.fillWindow(w.dict)
+		}
+	} else {
+		// w was created with NewWriter
+		w.d.reset(dst)
+	}
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+	w.dict = dict
+	w.d.reset(dst)
+	w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 0000000..bb36351
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+//   - Literal insertions: Runs of one or more symbols are inserted into the data
+//     stream as is. This is accomplished through the writeByte method for a
+//     single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+//     Any valid stream must start with a literal insertion if no preset dictionary
+//     is used.
+//
+//   - Backward copies: Runs of one or more symbols are copied from previously
+//     emitted data. Backward copies come as the tuple (dist, length) where dist
+//     determines how far back in the stream to copy from and length determines how
+//     many bytes to copy. Note that it is valid for the length to be greater than
+//     the distance. Since LZ77 uses forward copies, that situation is used to
+//     perform a form of run-length encoding on repeated runs of symbols.
+//     The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+	hist []byte // Sliding window history
+
+	// Invariant: 0 <= rdPos <= wrPos <= len(hist)
+	wrPos int  // Current output position in buffer
+	rdPos int  // Have emitted hist[:rdPos] already
+	full  bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+	*dd = dictDecoder{hist: dd.hist}
+
+	if cap(dd.hist) < size {
+		dd.hist = make([]byte, size)
+	}
+	dd.hist = dd.hist[:size]
+
+	if len(dict) > len(dd.hist) {
+		dict = dict[len(dict)-len(dd.hist):]
+	}
+	dd.wrPos = copy(dd.hist, dict)
+	if dd.wrPos == len(dd.hist) {
+		dd.wrPos = 0
+		dd.full = true
+	}
+	dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+	if dd.full {
+		return len(dd.hist)
+	}
+	return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+	return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+	return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+	return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+	dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+	dd.hist[dd.wrPos] = c
+	dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+	dstBase := dd.wrPos
+	dstPos := dstBase
+	srcPos := dstPos - dist
+	endPos := dstPos + length
+	if endPos > len(dd.hist) {
+		endPos = len(dd.hist)
+	}
+
+	// Copy non-overlapping section after destination position.
+	//
+	// This section is non-overlapping in that the copy length for this section
+	// is always less than or equal to the backwards distance. This can occur
+	// if a distance refers to data that wraps-around in the buffer.
+	// Thus, a backwards copy is performed here; that is, the exact bytes in
+	// the source prior to the copy is placed in the destination.
+	if srcPos < 0 {
+		srcPos += len(dd.hist)
+		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+		srcPos = 0
+	}
+
+	// Copy possibly overlapping section before destination position.
+	//
+	// This section can overlap if the copy length for this section is larger
+	// than the backwards distance. This is allowed by LZ77 so that repeated
+	// strings can be succinctly represented using (dist, length) pairs.
+	// Thus, a forwards copy is performed here; that is, the bytes copied is
+	// possibly dependent on the resulting bytes in the destination as the copy
+	// progresses along. This is functionally equivalent to the following:
+	//
+	//	for i := 0; i < endPos-dstPos; i++ {
+	//		dd.hist[dstPos+i] = dd.hist[srcPos+i]
+	//	}
+	//	dstPos = endPos
+	//
+	for dstPos < endPos {
+		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+	}
+
+	dd.wrPos = dstPos
+	return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+	dstPos := dd.wrPos
+	endPos := dstPos + length
+	if dstPos < dist || endPos > len(dd.hist) {
+		return 0
+	}
+	dstBase := dstPos
+	srcPos := dstPos - dist
+
+	// Copy possibly overlapping section before destination position.
+loop:
+	dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+	if dstPos < endPos {
+		goto loop // Avoid for-loop so that this function can be inlined
+	}
+
+	dd.wrPos = dstPos
+	return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+	toRead := dd.hist[dd.rdPos:dd.wrPos]
+	dd.rdPos = dd.wrPos
+	if dd.wrPos == len(dd.hist) {
+		dd.wrPos, dd.rdPos = 0, 0
+		dd.full = true
+	}
+	return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 0000000..0e8b163
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,232 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"fmt"
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+type fastEnc interface {
+	Encode(dst *tokens, src []byte)
+	Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+	switch level {
+	case 1:
+		return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 2:
+		return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 3:
+		return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 4:
+		return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 5:
+		return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 6:
+		return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+	default:
+		panic("invalid level specified")
+	}
+}
+
+const (
+	tableBits       = 15             // Bits used in the table
+	tableSize       = 1 << tableBits // Size of the table
+	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+	baseMatchOffset = 1              // The smallest match offset
+	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5
+	maxMatchOffset  = 1 << 15        // The largest match offset
+
+	bTableBits   = 17                                               // Bits used in the big tables
+	bTableSize   = 1 << bTableBits                                  // Size of the table
+	allocHistory = maxStoreBlockSize * 5                            // Size to preallocate for history.
+	bufferReset  = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+	prime3bytes = 506832829
+	prime4bytes = 2654435761
+	prime5bytes = 889523592379
+	prime6bytes = 227718039650203
+	prime7bytes = 58295818150454627
+	prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load3232(b []byte, i int32) uint32 {
+	return le.Load32(b, i)
+}
+
+func load6432(b []byte, i int32) uint64 {
+	return le.Load64(b, i)
+}
+
+type tableEntry struct {
+	offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+	hist []byte
+	cur  int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+	// check if we have space already
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.hist = make([]byte, 0, allocHistory)
+		} else {
+			if cap(e.hist) < maxMatchOffset*2 {
+				panic("unexpected buffer size")
+			}
+			// Move down
+			offset := int32(len(e.hist)) - maxMatchOffset
+			// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+			*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:maxMatchOffset]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+type tableEntryPrev struct {
+	Cur  tableEntry
+	Prev tableEntry
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
+}
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+	switch mls {
+	case 3:
+		return (uint32(u<<8) * prime3bytes) >> (32 - length)
+	case 5:
+		return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+	case 6:
+		return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+	case 7:
+		return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+	case 8:
+		return uint32((u * prime8bytes) >> (64 - length))
+	default:
+		return (uint32(u) * prime4bytes) >> (32 - length)
+	}
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > maxMatchOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	s1 := min(s+maxMatchLength-4, len(src))
+	left := s1 - s
+	n := int32(0)
+	for left >= 8 {
+		diff := le.Load64(src, s) ^ le.Load64(src, t)
+		if diff != 0 {
+			return n + int32(bits.TrailingZeros64(diff)>>3)
+		}
+		s += 8
+		t += 8
+		n += 8
+		left -= 8
+	}
+
+	a := src[s:s1]
+	b := src[t:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > maxMatchOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	// Extend the match to be as long as possible.
+	left := len(src) - s
+	n := int32(0)
+	for left >= 8 {
+		diff := le.Load64(src, s) ^ le.Load64(src, t)
+		if diff != 0 {
+			return n + int32(bits.TrailingZeros64(diff)>>3)
+		}
+		s += 8
+		t += 8
+		n += 8
+		left -= 8
+	}
+
+	a := src[s:]
+	b := src[t:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+	if cap(e.hist) < allocHistory {
+		e.hist = make([]byte, 0, allocHistory)
+	}
+	// We offset current position so everything will be out of reach.
+	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+	if e.cur <= bufferReset {
+		e.cur += maxMatchOffset + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 0000000..afdc8c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,1183 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+const (
+	// The largest offset code.
+	offsetCodeCount = 30
+
+	// The special code used to mark the end of a block.
+	endBlockMarker = 256
+
+	// The first length code.
+	lengthCodesStart = 257
+
+	// The number of codegen codes.
+	codegenCodeCount = 19
+	badCode          = 255
+
+	// maxPredefinedTokens is the maximum number of tokens
+	// where we check if fixed size is smaller.
+	maxPredefinedTokens = 250
+
+	// bufferFlushSize indicates the buffer size
+	// after which bytes are flushed to the writer.
+	// Should preferably be a multiple of 6, since
+	// we accumulate 6 bytes between writes to the buffer.
+	bufferFlushSize = 246
+)
+
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]uint8{
+	/* 257 */ 0, 0, 0,
+	/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+	/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+	/* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+	12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+	64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
+// offset code word extra bits.
+var offsetExtraBits = [32]int8{
+	0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+	/* extended window */
+	14, 14,
+}
+
+var offsetCombined = [32]uint32{}
+
+func init() {
+	var offsetBase = [32]uint32{
+		/* normal deflate */
+		0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+		0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+		0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+		0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+		0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+		0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+		/* extended window */
+		0x008000, 0x00c000,
+	}
+
+	for i := range offsetCombined[:] {
+		// Don't use extended window values...
+		if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
+			continue
+		}
+		offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
+	}
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+	// writer is the underlying writer.
+	// Do not use it directly; use the write method, which ensures
+	// that Write errors are sticky.
+	writer io.Writer
+
+	// Data waiting to be written is bytes[0:nbytes]
+	// and then the low nbits of bits.
+	bits            uint64
+	nbits           uint8
+	nbytes          uint8
+	lastHuffMan     bool
+	literalEncoding *huffmanEncoder
+	tmpLitEncoding  *huffmanEncoder
+	offsetEncoding  *huffmanEncoder
+	codegenEncoding *huffmanEncoder
+	err             error
+	lastHeader      int
+	// Set between 0 (reused block can be up to 2x the size)
+	logNewTablePenalty uint
+	bytes              [256 + 8]byte
+	literalFreq        [lengthCodesStart + 32]uint16
+	offsetFreq         [32]uint16
+	codegenFreq        [codegenCodeCount]uint16
+
+	// codegen must have an extra space for the final symbol.
+	codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+	return &huffmanBitWriter{
+		writer:          w,
+		literalEncoding: newHuffmanEncoder(literalCount),
+		tmpLitEncoding:  newHuffmanEncoder(literalCount),
+		codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+		offsetEncoding:  newHuffmanEncoder(offsetCodeCount),
+	}
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+	w.writer = writer
+	w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+	w.lastHeader = 0
+	w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
+	a := t.offHist[:offsetCodeCount]
+	b := w.offsetEncoding.codes
+	b = b[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+
+	a = t.extraHist[:literalCount-256]
+	b = w.literalEncoding.codes[256:literalCount]
+	b = b[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+
+	a = t.litHist[:256]
+	b = w.literalEncoding.codes[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+	return true
+}
+
+func (w *huffmanBitWriter) flush() {
+	if w.err != nil {
+		w.nbits = 0
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+	n := w.nbytes
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		if w.nbits > 8 { // Avoid underflow
+			w.nbits -= 8
+		} else {
+			w.nbits = 0
+		}
+		n++
+	}
+	w.bits = 0
+	w.write(w.bytes[:n])
+	w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+	w.bits |= uint64(b) << (w.nbits & 63)
+	w.nbits += nb
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+	if w.err != nil {
+		return
+	}
+	n := w.nbytes
+	if w.nbits&7 != 0 {
+		w.err = InternalError("writeBytes with unfinished bits")
+		return
+	}
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		w.nbits -= 8
+		n++
+	}
+	if n != 0 {
+		w.write(w.bytes[:n])
+	}
+	w.nbytes = 0
+	w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array).  This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+//	numLiterals      The number of literals in literalEncoding
+//	numOffsets       The number of offsets in offsetEncoding
+//	litenc, offenc   The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+	for i := range w.codegenFreq {
+		w.codegenFreq[i] = 0
+	}
+	// Note that we are using codegen both as a temporary variable for holding
+	// a copy of the frequencies, and as the place where we put the result.
+	// This is fine because the output is always shorter than the input used
+	// so far.
+	codegen := w.codegen[:] // cache
+	// Copy the concatenated code sizes to codegen. Put a marker at the end.
+	cgnl := codegen[:numLiterals]
+	for i := range cgnl {
+		cgnl[i] = litEnc.codes[i].len()
+	}
+
+	cgnl = codegen[numLiterals : numLiterals+numOffsets]
+	for i := range cgnl {
+		cgnl[i] = offEnc.codes[i].len()
+	}
+	codegen[numLiterals+numOffsets] = badCode
+
+	size := codegen[0]
+	count := 1
+	outIndex := 0
+	for inIndex := 1; size != badCode; inIndex++ {
+		// INVARIANT: We have seen "count" copies of size that have not yet
+		// had output generated for them.
+		nextSize := codegen[inIndex]
+		if nextSize == size {
+			count++
+			continue
+		}
+		// We need to generate codegen indicating "count" of size.
+		if size != 0 {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+			count--
+			for count >= 3 {
+				n := 6
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 16
+				outIndex++
+				codegen[outIndex] = uint8(n - 3)
+				outIndex++
+				w.codegenFreq[16]++
+				count -= n
+			}
+		} else {
+			for count >= 11 {
+				n := 138
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 18
+				outIndex++
+				codegen[outIndex] = uint8(n - 11)
+				outIndex++
+				w.codegenFreq[18]++
+				count -= n
+			}
+			if count >= 3 {
+				// count >= 3 && count <= 10
+				codegen[outIndex] = 17
+				outIndex++
+				codegen[outIndex] = uint8(count - 3)
+				outIndex++
+				w.codegenFreq[17]++
+				count = 0
+			}
+		}
+		count--
+		for ; count >= 0; count-- {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+		}
+		// Set up invariant for next time through the loop.
+		size = nextSize
+		count = 1
+	}
+	// Marker indicating the end of the codegen.
+	codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+	numCodegens := len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+	numCodegens = len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+		w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+		int(w.codegenFreq[16])*2 +
+		int(w.codegenFreq[17])*3 +
+		int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+	size = litEnc.bitLength(w.literalFreq[:]) +
+		offEnc.bitLength(w.offsetFreq[:])
+	return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+	header, numCodegens := w.headerSize()
+	size = header +
+		litEnc.bitLength(w.literalFreq[:]) +
+		offEnc.bitLength(w.offsetFreq[:]) +
+		extraBits
+	return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+	total := 0
+	for i, n := range w.literalFreq[257:literalCount] {
+		total += int(n) * int(lengthExtraBits[i&31])
+	}
+	for i, n := range w.offsetFreq[:offsetCodeCount] {
+		total += int(n) * int(offsetExtraBits[i&31])
+	}
+	return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+	return 3 +
+		fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+		fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+		extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+	if in == nil {
+		return 0, false
+	}
+	if len(in) <= maxStoreBlockSize {
+		return (len(in) + 5) * 8, true
+	}
+	return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+	// The function does not get inlined if we "& 63" the shift.
+	w.bits |= c.code64() << (w.nbits & 63)
+	w.nbits += c.len()
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+	bits := w.bits
+	w.bits >>= 48
+	w.nbits -= 48
+	n := w.nbytes
+
+	// We over-write, but faster...
+	le.Store64(w.bytes[n:], bits)
+	n += 6
+
+	if n >= bufferFlushSize {
+		if w.err != nil {
+			n = 0
+			return
+		}
+		w.write(w.bytes[:n])
+		n = 0
+	}
+
+	w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+//	numLiterals  The number of literals specified in codegen
+//	numOffsets   The number of offsets specified in codegen
+//	numCodegens  The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	var firstBits int32 = 4
+	if isEof {
+		firstBits = 5
+	}
+	w.writeBits(firstBits, 3)
+	w.writeBits(int32(numLiterals-257), 5)
+	w.writeBits(int32(numOffsets-1), 5)
+	w.writeBits(int32(numCodegens-4), 4)
+
+	for i := 0; i < numCodegens; i++ {
+		value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
+		w.writeBits(int32(value), 3)
+	}
+
+	i := 0
+	for {
+		var codeWord = uint32(w.codegen[i])
+		i++
+		if codeWord == badCode {
+			break
+		}
+		w.writeCode(w.codegenEncoding.codes[codeWord])
+
+		switch codeWord {
+		case 16:
+			w.writeBits(int32(w.codegen[i]), 2)
+			i++
+		case 17:
+			w.writeBits(int32(w.codegen[i]), 3)
+			i++
+		case 18:
+			w.writeBits(int32(w.codegen[i]), 7)
+			i++
+		}
+	}
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	// To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+	if length == 0 && isEof {
+		w.writeFixedHeader(isEof)
+		// EOB: 7 bits, value: 0
+		w.writeBits(0, 7)
+		w.flush()
+		return
+	}
+
+	var flag int32
+	if isEof {
+		flag = 1
+	}
+	w.writeBits(flag, 3)
+	w.flush()
+	w.writeBits(int32(length), 16)
+	w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+	if w.err != nil {
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	// Indicate that we are a fixed Huffman block
+	var value int32 = 2
+	if isEof {
+		value = 3
+	}
+	w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+	if w.err != nil {
+		return
+	}
+
+	tokens.AddEOB()
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+	numLiterals, numOffsets := w.indexTokens(tokens, false)
+	w.generate()
+	var extraBits int
+	storedSize, storable := w.storedSize(input)
+	if storable {
+		extraBits = w.extraBitSize()
+	}
+
+	// Figure out smallest code.
+	// Fixed Huffman baseline.
+	var literalEncoding = fixedLiteralEncoding
+	var offsetEncoding = fixedOffsetEncoding
+	var size = math.MaxInt32
+	if tokens.n < maxPredefinedTokens {
+		size = w.fixedSize(extraBits)
+	}
+
+	// Dynamic Huffman?
+	var numCodegens int
+
+	// Generate codegen and codegenFrequencies, which indicates how to encode
+	// the literalEncoding and the offsetEncoding.
+	w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+	w.codegenEncoding.generate(w.codegenFreq[:], 7)
+	dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+	if dynamicSize < size {
+		size = dynamicSize
+		literalEncoding = w.literalEncoding
+		offsetEncoding = w.offsetEncoding
+	}
+
+	// Stored bytes?
+	if storable && storedSize <= size {
+		w.writeStoredHeader(len(input), eof)
+		w.writeBytes(input)
+		return
+	}
+
+	// Huffman.
+	if literalEncoding == fixedLiteralEncoding {
+		w.writeFixedHeader(eof)
+	} else {
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+	}
+
+	// Write the tokens.
+	w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+	if w.err != nil {
+		return
+	}
+
+	sync = sync || eof
+	if sync {
+		tokens.AddEOB()
+	}
+
+	// We cannot reuse pure huffman table, and must mark as EOF.
+	if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+		// We will not try to reuse.
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+		w.lastHuffMan = false
+	}
+
+	// fillReuse enables filling of empty values.
+	// This will make encodings always reusable without testing.
+	// However, this does not appear to benefit on most cases.
+	const fillReuse = false
+
+	// Check if we can reuse...
+	if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+	extraBits := 0
+	ssize, storable := w.storedSize(input)
+
+	const usePrefs = true
+	if storable || w.lastHeader > 0 {
+		extraBits = w.extraBitSize()
+	}
+
+	var size int
+
+	// Check if we should reuse.
+	if w.lastHeader > 0 {
+		// Estimate size for using a new table.
+		// Use the previous header size as the best estimate.
+		newSize := w.lastHeader + tokens.EstimatedBits()
+		newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
+
+		// The estimated size is calculated as an optimal table.
+		// We add a penalty to make it more realistic and re-use a bit more.
+		reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
+
+		// Check if a new table is better.
+		if newSize < reuseSize {
+			// Write the EOB we owe.
+			w.writeCode(w.literalEncoding.codes[endBlockMarker])
+			size = newSize
+			w.lastHeader = 0
+		} else {
+			size = reuseSize
+		}
+
+		if tokens.n < maxPredefinedTokens {
+			if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+				// Check if we get a reasonable size decrease.
+				if storable && ssize <= size {
+					w.writeStoredHeader(len(input), eof)
+					w.writeBytes(input)
+					return
+				}
+				w.writeFixedHeader(eof)
+				if !sync {
+					tokens.AddEOB()
+				}
+				w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+				return
+			}
+		}
+		// Check if we get a reasonable size decrease.
+		if storable && ssize <= size {
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+	}
+
+	// We want a new block/table
+	if w.lastHeader == 0 {
+		if fillReuse && !sync {
+			w.fillTokens()
+			numLiterals, numOffsets = maxNumLit, maxNumDist
+		} else {
+			w.literalFreq[endBlockMarker] = 1
+		}
+
+		w.generate()
+		// Generate codegen and codegenFrequencies, which indicates how to encode
+		// the literalEncoding and the offsetEncoding.
+		w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+		w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
+		var numCodegens int
+		if fillReuse && !sync {
+			// Reindex for accurate size...
+			w.indexTokens(tokens, true)
+		}
+		size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+		// Store predefined, if we don't get a reasonable improvement.
+		if tokens.n < maxPredefinedTokens {
+			if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+				// Store bytes, if we don't get an improvement.
+				if storable && ssize <= preSize {
+					w.writeStoredHeader(len(input), eof)
+					w.writeBytes(input)
+					return
+				}
+				w.writeFixedHeader(eof)
+				if !sync {
+					tokens.AddEOB()
+				}
+				w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+				return
+			}
+		}
+
+		if storable && ssize <= size {
+			// Store bytes, if we don't get an improvement.
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+
+		// Write Huffman table.
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+		if !sync {
+			w.lastHeader, _ = w.headerSize()
+		}
+		w.lastHuffMan = false
+	}
+
+	if sync {
+		w.lastHeader = 0
+	}
+	// Write the tokens.
+	w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+func (w *huffmanBitWriter) fillTokens() {
+	for i, v := range w.literalFreq[:literalCount] {
+		if v == 0 {
+			w.literalFreq[i] = 1
+		}
+	}
+	for i, v := range w.offsetFreq[:offsetCodeCount] {
+		if v == 0 {
+			w.offsetFreq[i] = 1
+		}
+	}
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+	//copy(w.literalFreq[:], t.litHist[:])
+	*(*[256]uint16)(w.literalFreq[:]) = t.litHist
+	//copy(w.literalFreq[256:], t.extraHist[:])
+	*(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+	w.offsetFreq = t.offHist
+
+	if t.n == 0 {
+		return
+	}
+	if filled {
+		return maxNumLit, maxNumDist
+	}
+	// get the number of literals
+	numLiterals = len(w.literalFreq)
+	for w.literalFreq[numLiterals-1] == 0 {
+		numLiterals--
+	}
+	// get the number of offsets
+	numOffsets = len(w.offsetFreq)
+	for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+		numOffsets--
+	}
+	if numOffsets == 0 {
+		// We haven't found a single match. If we want to go with the dynamic encoding,
+		// we should count at least one offset to be sure that the offset huffman tree could be encoded.
+		w.offsetFreq[0] = 1
+		numOffsets = 1
+	}
+	return
+}
+
+func (w *huffmanBitWriter) generate() {
+	w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+	w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+	if w.err != nil {
+		return
+	}
+	if len(tokens) == 0 {
+		return
+	}
+
+	// Only last token should be endBlockMarker.
+	var deferEOB bool
+	if tokens[len(tokens)-1] == endBlockMarker {
+		tokens = tokens[:len(tokens)-1]
+		deferEOB = true
+	}
+
+	// Create slices up to the next power of two to avoid bounds checks.
+	lits := leCodes[:256]
+	offs := oeCodes[:32]
+	lengths := leCodes[lengthCodesStart:]
+	lengths = lengths[:32]
+
+	// Go 1.16 LOVES having these on stack.
+	bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+	for _, t := range tokens {
+		if t < 256 {
+			//w.writeCode(lits[t.literal()])
+			c := lits[t]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+			continue
+		}
+
+		// Write the length
+		length := t.length()
+		lengthCode := lengthCode(length) & 31
+		if false {
+			w.writeCode(lengths[lengthCode])
+		} else {
+			// inlined
+			c := lengths[lengthCode]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+
+		if lengthCode >= lengthExtraBitsMinCode {
+			extraLengthBits := lengthExtraBits[lengthCode]
+			//w.writeBits(extraLength, extraLengthBits)
+			extraLength := int32(length - lengthBase[lengthCode])
+			bits |= uint64(extraLength) << (nbits & 63)
+			nbits += extraLengthBits
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+		// Write the offset
+		offset := t.offset()
+		offsetCode := (offset >> 16) & 31
+		if false {
+			w.writeCode(offs[offsetCode])
+		} else {
+			// inlined
+			c := offs[offsetCode]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+
+		if offsetCode >= offsetExtraBitsMinCode {
+			offsetComb := offsetCombined[offsetCode]
+			//w.writeBits(extraOffset, extraOffsetBits)
+			bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+			nbits += uint8(offsetComb)
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+	}
+	// Restore...
+	w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+	if deferEOB {
+		w.writeCode(leCodes[endBlockMarker])
+	}
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+	w := newHuffmanBitWriter(nil)
+	w.offsetFreq[0] = 1
+	huffOffset = newHuffmanEncoder(offsetCodeCount)
+	huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+	if w.err != nil {
+		return
+	}
+
+	// Clear histogram
+	for i := range w.literalFreq[:] {
+		w.literalFreq[i] = 0
+	}
+	if !w.lastHuffMan {
+		for i := range w.offsetFreq[:] {
+			w.offsetFreq[i] = 0
+		}
+	}
+
+	const numLiterals = endBlockMarker + 1
+	const numOffsets = 1
+
+	// Add everything as literals
+	// We have to estimate the header size.
+	// Assume header is around 70 bytes:
+	// https://stackoverflow.com/a/25454430
+	const guessHeaderSizeBits = 70 * 8
+	histogram(input, w.literalFreq[:numLiterals])
+	ssize, storable := w.storedSize(input)
+	if storable && len(input) > 1024 {
+		// Quick check for incompressible content.
+		abs := float64(0)
+		avg := float64(len(input)) / 256
+		max := float64(len(input) * 2)
+		for _, v := range w.literalFreq[:256] {
+			diff := float64(v) - avg
+			abs += diff * diff
+			if abs > max {
+				break
+			}
+		}
+		if abs < max {
+			if debugDeflate {
+				fmt.Println("stored", abs, "<", max)
+			}
+			// No chance we can compress this...
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+	}
+	w.literalFreq[endBlockMarker] = 1
+	w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+	estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+	if estBits < math.MaxInt32 {
+		estBits += w.lastHeader
+		if w.lastHeader == 0 {
+			estBits += guessHeaderSizeBits
+		}
+		estBits += estBits >> w.logNewTablePenalty
+	}
+
+	// Store bytes, if we don't get a reasonable improvement.
+	if storable && ssize <= estBits {
+		if debugDeflate {
+			fmt.Println("stored,", ssize, "<=", estBits)
+		}
+		w.writeStoredHeader(len(input), eof)
+		w.writeBytes(input)
+		return
+	}
+
+	if w.lastHeader > 0 {
+		reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
+
+		if estBits < reuseSize {
+			if debugDeflate {
+				fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
+			}
+			// We owe an EOB
+			w.writeCode(w.literalEncoding.codes[endBlockMarker])
+			w.lastHeader = 0
+		} else if debugDeflate {
+			fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+		}
+	}
+
+	count := 0
+	if w.lastHeader == 0 {
+		// Use the temp encoding, so swap.
+		w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
+		// Generate codegen and codegenFrequencies, which indicates how to encode
+		// the literalEncoding and the offsetEncoding.
+		w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+		w.codegenEncoding.generate(w.codegenFreq[:], 7)
+		numCodegens := w.codegens()
+
+		// Huffman.
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+		w.lastHuffMan = true
+		w.lastHeader, _ = w.headerSize()
+		if debugDeflate {
+			count += w.lastHeader
+			fmt.Println("header:", count/8)
+		}
+	}
+
+	encoding := w.literalEncoding.codes[:256]
+	// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+	bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+	if debugDeflate {
+		count -= int(nbytes)*8 + int(nbits)
+	}
+	// Unroll, write 3 codes/loop.
+	// Fastest number of unrolls.
+	for len(input) > 3 {
+		// We must have at least 48 bits free.
+		if nbits >= 8 {
+			n := nbits >> 3
+			le.Store64(w.bytes[nbytes:], bits)
+			bits >>= (n * 8) & 63
+			nbits -= n * 8
+			nbytes += n
+		}
+		if nbytes >= bufferFlushSize {
+			if w.err != nil {
+				nbytes = 0
+				return
+			}
+			if debugDeflate {
+				count += int(nbytes) * 8
+			}
+			_, w.err = w.writer.Write(w.bytes[:nbytes])
+			nbytes = 0
+		}
+		a, b := encoding[input[0]], encoding[input[1]]
+		bits |= a.code64() << (nbits & 63)
+		bits |= b.code64() << ((nbits + a.len()) & 63)
+		c := encoding[input[2]]
+		nbits += b.len() + a.len()
+		bits |= c.code64() << (nbits & 63)
+		nbits += c.len()
+		input = input[3:]
+	}
+
+	// Remaining...
+	for _, t := range input {
+		if nbits >= 48 {
+			le.Store64(w.bytes[nbytes:], bits)
+			//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+			bits >>= 48
+			nbits -= 48
+			nbytes += 6
+			if nbytes >= bufferFlushSize {
+				if w.err != nil {
+					nbytes = 0
+					return
+				}
+				if debugDeflate {
+					count += int(nbytes) * 8
+				}
+				_, w.err = w.writer.Write(w.bytes[:nbytes])
+				nbytes = 0
+			}
+		}
+		// Bitwriting inlined, ~30% speedup
+		c := encoding[t]
+		bits |= c.code64() << (nbits & 63)
+
+		nbits += c.len()
+		if debugDeflate {
+			count += int(c.len())
+		}
+	}
+	// Restore...
+	w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+	if debugDeflate {
+		nb := count + int(nbytes)*8 + int(nbits)
+		fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+	}
+	// Flush if needed to have space.
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+
+	if eof || sync {
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+		w.lastHuffMan = false
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 0000000..be7b58b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"math"
+	"math/bits"
+)
+
+const (
+	maxBitsLimit = 16
+	// number of valid literals
+	literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode uint32
+
+func (h hcode) len() uint8 {
+	return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+	return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+	return h == 0
+}
+
+type huffmanEncoder struct {
+	codes    []hcode
+	bitCount [17]int32
+
+	// Allocate a reusable buffer with the longest possible frequency table.
+	// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+	// The largest of these is literalCount, so we allocate for that case.
+	freqcache [literalCount + 1]literalNode
+}
+
+type literalNode struct {
+	literal uint16
+	freq    uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+	// Our level.  for better printing
+	level int32
+
+	// The frequency of the last node at this level
+	lastFreq int32
+
+	// The frequency of the next character to add to this level
+	nextCharFreq int32
+
+	// The frequency of the next pair (from level below) to add to this level.
+	// Only valid if the "needed" value of the next lower level is 0.
+	nextPairFreq int32
+
+	// The number of chains remaining to generate for this level before moving
+	// up to the next level
+	needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint8) {
+	*h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+	return hcode(length) | (hcode(code) << 8)
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+	return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+	// Make capacity to next power of two.
+	c := uint(bits.Len32(uint32(size - 1)))
+	return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(literalCount)
+	codes := h.codes
+	var ch uint16
+	for ch = 0; ch < literalCount; ch++ {
+		var bits uint16
+		var size uint8
+		switch {
+		case ch < 144:
+			// size 8, 000110000  .. 10111111
+			bits = ch + 48
+			size = 8
+		case ch < 256:
+			// size 9, 110010000 .. 111111111
+			bits = ch + 400 - 144
+			size = 9
+		case ch < 280:
+			// size 7, 0000000 .. 0010111
+			bits = ch - 256
+			size = 7
+		default:
+			// size 8, 11000000 .. 11000111
+			bits = ch + 192 - 280
+			size = 8
+		}
+		codes[ch] = newhcode(reverseBits(bits, size), size)
+	}
+	return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(30)
+	codes := h.codes
+	for ch := range codes {
+		codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
+	}
+	return h
+}
+
+var fixedLiteralEncoding = generateFixedLiteralEncoding()
+var fixedOffsetEncoding = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []uint16) int {
+	var total int
+	for i, f := range freq {
+		if f != 0 {
+			total += int(f) * int(h.codes[i].len())
+		}
+	}
+	return total
+}
+
+func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
+	var total int
+	for _, f := range b {
+		total += int(h.codes[f].len())
+	}
+	return total
+}
+
+// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
+func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
+	var total int
+	for i, f := range freq {
+		if f != 0 {
+			code := h.codes[i]
+			if code.zero() {
+				return math.MaxInt32
+			}
+			total += int(f) * int(code.len())
+		}
+	}
+	return total
+}
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list  An array of the literals with non-zero frequencies
+//
+//	and their associated frequencies. The array is in order of increasing
+//	frequency, and has as its last element a special element with frequency
+//	MaxInt32
+//
+// maxBits     The maximum number of bits that should be used to encode any literal.
+//
+//	Must be less than 16.
+//
+// return      An integer array in which array[i] indicates the number of literals
+//
+//	that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+	if maxBits >= maxBitsLimit {
+		panic("flate: maxBits too large")
+	}
+	n := int32(len(list))
+	list = list[0 : n+1]
+	list[n] = maxNode()
+
+	// The tree can't have greater depth than n - 1, no matter what. This
+	// saves a little bit of work in some small cases
+	if maxBits > n-1 {
+		maxBits = n - 1
+	}
+
+	// Create information about each of the levels.
+	// A bogus "Level 0" whose sole purpose is so that
+	// level1.prev.needed==0.  This makes level1.nextPairFreq
+	// be a legitimate value that never gets chosen.
+	var levels [maxBitsLimit]levelInfo
+	// leafCounts[i] counts the number of literals at the left
+	// of ancestors of the rightmost node at level i.
+	// leafCounts[i][j] is the number of literals at the left
+	// of the level j ancestor.
+	var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+	// Descending to only have 1 bounds check.
+	l2f := int32(list[2].freq)
+	l1f := int32(list[1].freq)
+	l0f := int32(list[0].freq) + int32(list[1].freq)
+
+	for level := int32(1); level <= maxBits; level++ {
+		// For every level, the first two items are the first two characters.
+		// We initialize the levels as if we had already figured this out.
+		levels[level] = levelInfo{
+			level:        level,
+			lastFreq:     l1f,
+			nextCharFreq: l2f,
+			nextPairFreq: l0f,
+		}
+		leafCounts[level][level] = 2
+		if level == 1 {
+			levels[level].nextPairFreq = math.MaxInt32
+		}
+	}
+
+	// We need a total of 2*n - 2 items at top level and have already generated 2.
+	levels[maxBits].needed = 2*n - 4
+
+	level := uint32(maxBits)
+	for level < 16 {
+		l := &levels[level]
+		if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+			// We've run out of both leafs and pairs.
+			// End all calculations for this level.
+			// To make sure we never come back to this level or any lower level,
+			// set nextPairFreq impossibly large.
+			l.needed = 0
+			levels[level+1].nextPairFreq = math.MaxInt32
+			level++
+			continue
+		}
+
+		prevFreq := l.lastFreq
+		if l.nextCharFreq < l.nextPairFreq {
+			// The next item on this row is a leaf node.
+			n := leafCounts[level][level] + 1
+			l.lastFreq = l.nextCharFreq
+			// Lower leafCounts are the same of the previous node.
+			leafCounts[level][level] = n
+			e := list[n]
+			if e.literal < math.MaxUint16 {
+				l.nextCharFreq = int32(e.freq)
+			} else {
+				l.nextCharFreq = math.MaxInt32
+			}
+		} else {
+			// The next item on this row is a pair from the previous row.
+			// nextPairFreq isn't valid until we generate two
+			// more values in the level below
+			l.lastFreq = l.nextPairFreq
+			// Take leaf counts from the lower level, except counts[level] remains the same.
+			if true {
+				save := leafCounts[level][level]
+				leafCounts[level] = leafCounts[level-1]
+				leafCounts[level][level] = save
+			} else {
+				copy(leafCounts[level][:level], leafCounts[level-1][:level])
+			}
+			levels[l.level-1].needed = 2
+		}
+
+		if l.needed--; l.needed == 0 {
+			// We've done everything we need to do for this level.
+			// Continue calculating one level up. Fill in nextPairFreq
+			// of that level with the sum of the two nodes we've just calculated on
+			// this level.
+			if l.level == maxBits {
+				// All done!
+				break
+			}
+			levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+			level++
+		} else {
+			// If we stole from below, move down temporarily to replenish it.
+			for levels[level-1].needed > 0 {
+				level--
+			}
+		}
+	}
+
+	// Somethings is wrong if at the end, the top level is null or hasn't used
+	// all of the leaves.
+	if leafCounts[maxBits][maxBits] != n {
+		panic("leafCounts[maxBits][maxBits] != n")
+	}
+
+	bitCount := h.bitCount[:maxBits+1]
+	bits := 1
+	counts := &leafCounts[maxBits]
+	for level := maxBits; level > 0; level-- {
+		// chain.leafCount gives the number of literals requiring at least "bits"
+		// bits to encode.
+		bitCount[bits] = counts[level] - counts[level-1]
+		bits++
+	}
+	return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+	code := uint16(0)
+	for n, bits := range bitCount {
+		code <<= 1
+		if n == 0 || bits == 0 {
+			continue
+		}
+		// The literals list[len(list)-bits] .. list[len(list)-bits]
+		// are encoded using "bits" bits, and get the values
+		// code, code + 1, ....  The code values are
+		// assigned in literal order (not frequency order).
+		chunk := list[len(list)-int(bits):]
+
+		sortByLiteral(chunk)
+		for _, node := range chunk {
+			h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
+			code++
+		}
+		list = list[0 : len(list)-int(bits)]
+	}
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq  An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits  The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+	list := h.freqcache[:len(freq)+1]
+	codes := h.codes[:len(freq)]
+	// Number of non-zero literals
+	count := 0
+	// Set list to be the set of all non-zero literals and their frequencies
+	for i, f := range freq {
+		if f != 0 {
+			list[count] = literalNode{uint16(i), f}
+			count++
+		} else {
+			codes[i] = 0
+		}
+	}
+	list[count] = literalNode{}
+
+	list = list[:count]
+	if count <= 2 {
+		// Handle the small cases here, because they are awkward for the general case code. With
+		// two or fewer literals, everything has bit length 1.
+		for i, node := range list {
+			// "list" is in order of increasing literal value.
+			h.codes[node.literal].set(uint16(i), 1)
+		}
+		return
+	}
+	sortByFreq(list)
+
+	// Get the number of literals for each bit count
+	bitCount := h.bitCounts(list, maxBits)
+	// And do the assignment
+	h.assignEncodingAndSize(bitCount, list)
+}
+
+// atLeastOne clamps the result between 1 and 15.
+func atLeastOne(v float32) float32 {
+	if v < 1 {
+		return 1
+	}
+	if v > 15 {
+		return 15
+	}
+	return v
+}
+
+func histogram(b []byte, h []uint16) {
+	if true && len(b) >= 8<<10 {
+		// Split for bigger inputs
+		histogramSplit(b, h)
+	} else {
+		h = h[:256]
+		for _, t := range b {
+			h[t]++
+		}
+	}
+}
+
+func histogramSplit(b []byte, h []uint16) {
+	// Tested, and slightly faster than 2-way.
+	// Writing to separate arrays and combining is also slightly slower.
+	h = h[:256]
+	for len(b)&3 != 0 {
+		h[b[0]]++
+		b = b[1:]
+	}
+	n := len(b) / 4
+	x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+	y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+	for i, t := range x {
+		v0 := &h[t]
+		v1 := &h[y[i]]
+		v3 := &h[w[i]]
+		v2 := &h[z[i]]
+		*v0++
+		*v1++
+		*v2++
+		*v3++
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 0000000..6c05ba8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+	n := len(data)
+	quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+	for b-a > 12 { // Use ShellSort for slices <= 12 elements
+		if maxDepth == 0 {
+			heapSort(data, a, b)
+			return
+		}
+		maxDepth--
+		mlo, mhi := doPivotByFreq(data, a, b)
+		// Avoiding recursion on the larger subproblem guarantees
+		// a stack depth of at most lg(b-a).
+		if mlo-a < b-mhi {
+			quickSortByFreq(data, a, mlo, maxDepth)
+			a = mhi // i.e., quickSortByFreq(data, mhi, b)
+		} else {
+			quickSortByFreq(data, mhi, b, maxDepth)
+			b = mlo // i.e., quickSortByFreq(data, a, mlo)
+		}
+	}
+	if b-a > 1 {
+		// Do ShellSort pass with gap 6
+		// It could be written in this simplified form cause b-a <= 12
+		for i := a + 6; i < b; i++ {
+			if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+				data[i], data[i-6] = data[i-6], data[i]
+			}
+		}
+		insertionSortByFreq(data, a, b)
+	}
+}
+
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+	m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+	if hi-lo > 40 {
+		// Tukey's ``Ninther,'' median of three medians of three.
+		s := (hi - lo) / 8
+		medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+		medianOfThreeSortByFreq(data, m, m-s, m+s)
+		medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+	}
+	medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+	// Invariants are:
+	//	data[lo] = pivot (set up by ChoosePivot)
+	//	data[lo < i < a] < pivot
+	//	data[a <= i < b] <= pivot
+	//	data[b <= i < c] unexamined
+	//	data[c <= i < hi-1] > pivot
+	//	data[hi-1] >= pivot
+	pivot := lo
+	a, c := lo+1, hi-1
+
+	for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+	}
+	b := a
+	for {
+		for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+		}
+		for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+		}
+		if b >= c {
+			break
+		}
+		// data[b] > pivot; data[c-1] <= pivot
+		data[b], data[c-1] = data[c-1], data[b]
+		b++
+		c--
+	}
+	// If hi-c<3 then there are duplicates (by property of median of nine).
+	// Let's be a bit more conservative, and set border to 5.
+	protect := hi-c < 5
+	if !protect && hi-c < (hi-lo)/4 {
+		// Lets test some points for equality to pivot
+		dups := 0
+		if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+			data[c], data[hi-1] = data[hi-1], data[c]
+			c++
+			dups++
+		}
+		if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+			b--
+			dups++
+		}
+		// m-lo = (hi-lo)/2 > 6
+		// b-lo > (hi-lo)*3/4-1 > 8
+		// ==> m < b ==> data[m] <= pivot
+		if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+			data[m], data[b-1] = data[b-1], data[m]
+			b--
+			dups++
+		}
+		// if at least 2 points are equal to pivot, assume skewed distribution
+		protect = dups > 1
+	}
+	if protect {
+		// Protect against a lot of duplicates
+		// Add invariant:
+		//	data[a <= i < b] unexamined
+		//	data[b <= i < c] = pivot
+		for {
+			for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+			}
+			for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+			}
+			if a >= b {
+				break
+			}
+			// data[a] == pivot; data[b-1] < pivot
+			data[a], data[b-1] = data[b-1], data[a]
+			a++
+			b--
+		}
+	}
+	// Swap pivot into middle
+	data[pivot], data[b-1] = data[b-1], data[pivot]
+	return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+	// sort 3 elements
+	if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+		data[m1], data[m0] = data[m0], data[m1]
+	}
+	// data[m0] <= data[m1]
+	if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+		data[m2], data[m1] = data[m1], data[m2]
+		// data[m0] <= data[m2] && data[m1] < data[m2]
+		if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+			data[m1], data[m0] = data[m0], data[m1]
+		}
+	}
+	// now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 0000000..93f1aea
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+	n := len(data)
+	quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+	for b-a > 12 { // Use ShellSort for slices <= 12 elements
+		if maxDepth == 0 {
+			heapSort(data, a, b)
+			return
+		}
+		maxDepth--
+		mlo, mhi := doPivot(data, a, b)
+		// Avoiding recursion on the larger subproblem guarantees
+		// a stack depth of at most lg(b-a).
+		if mlo-a < b-mhi {
+			quickSort(data, a, mlo, maxDepth)
+			a = mhi // i.e., quickSort(data, mhi, b)
+		} else {
+			quickSort(data, mhi, b, maxDepth)
+			b = mlo // i.e., quickSort(data, a, mlo)
+		}
+	}
+	if b-a > 1 {
+		// Do ShellSort pass with gap 6
+		// It could be written in this simplified form cause b-a <= 12
+		for i := a + 6; i < b; i++ {
+			if data[i].literal < data[i-6].literal {
+				data[i], data[i-6] = data[i-6], data[i]
+			}
+		}
+		insertionSort(data, a, b)
+	}
+}
+func heapSort(data []literalNode, a, b int) {
+	first := a
+	lo := 0
+	hi := b - a
+
+	// Build heap with greatest element at top.
+	for i := (hi - 1) / 2; i >= 0; i-- {
+		siftDown(data, i, hi, first)
+	}
+
+	// Pop elements, largest first, into end of data.
+	for i := hi - 1; i >= 0; i-- {
+		data[first], data[first+i] = data[first+i], data[first]
+		siftDown(data, lo, i, first)
+	}
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+	root := lo
+	for {
+		child := 2*root + 1
+		if child >= hi {
+			break
+		}
+		if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+			child++
+		}
+		if data[first+root].literal > data[first+child].literal {
+			return
+		}
+		data[first+root], data[first+child] = data[first+child], data[first+root]
+		root = child
+	}
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+	m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+	if hi-lo > 40 {
+		// Tukey's ``Ninther,'' median of three medians of three.
+		s := (hi - lo) / 8
+		medianOfThree(data, lo, lo+s, lo+2*s)
+		medianOfThree(data, m, m-s, m+s)
+		medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+	}
+	medianOfThree(data, lo, m, hi-1)
+
+	// Invariants are:
+	//	data[lo] = pivot (set up by ChoosePivot)
+	//	data[lo < i < a] < pivot
+	//	data[a <= i < b] <= pivot
+	//	data[b <= i < c] unexamined
+	//	data[c <= i < hi-1] > pivot
+	//	data[hi-1] >= pivot
+	pivot := lo
+	a, c := lo+1, hi-1
+
+	for ; a < c && data[a].literal < data[pivot].literal; a++ {
+	}
+	b := a
+	for {
+		for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+		}
+		for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+		}
+		if b >= c {
+			break
+		}
+		// data[b] > pivot; data[c-1] <= pivot
+		data[b], data[c-1] = data[c-1], data[b]
+		b++
+		c--
+	}
+	// If hi-c<3 then there are duplicates (by property of median of nine).
+	// Let's be a bit more conservative, and set border to 5.
+	protect := hi-c < 5
+	if !protect && hi-c < (hi-lo)/4 {
+		// Lets test some points for equality to pivot
+		dups := 0
+		if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+			data[c], data[hi-1] = data[hi-1], data[c]
+			c++
+			dups++
+		}
+		if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+			b--
+			dups++
+		}
+		// m-lo = (hi-lo)/2 > 6
+		// b-lo > (hi-lo)*3/4-1 > 8
+		// ==> m < b ==> data[m] <= pivot
+		if data[m].literal > data[pivot].literal { // data[m] = pivot
+			data[m], data[b-1] = data[b-1], data[m]
+			b--
+			dups++
+		}
+		// if at least 2 points are equal to pivot, assume skewed distribution
+		protect = dups > 1
+	}
+	if protect {
+		// Protect against a lot of duplicates
+		// Add invariant:
+		//	data[a <= i < b] unexamined
+		//	data[b <= i < c] = pivot
+		for {
+			for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+			}
+			for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+			}
+			if a >= b {
+				break
+			}
+			// data[a] == pivot; data[b-1] < pivot
+			data[a], data[b-1] = data[b-1], data[a]
+			a++
+			b--
+		}
+	}
+	// Swap pivot into middle
+	data[pivot], data[b-1] = data[b-1], data[pivot]
+	return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+	var depth int
+	for i := n; i > 0; i >>= 1 {
+		depth++
+	}
+	return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+	// sort 3 elements
+	if data[m1].literal < data[m0].literal {
+		data[m1], data[m0] = data[m0], data[m1]
+	}
+	// data[m0] <= data[m1]
+	if data[m2].literal < data[m1].literal {
+		data[m2], data[m1] = data[m1], data[m2]
+		// data[m0] <= data[m2] && data[m1] < data[m2]
+		if data[m1].literal < data[m0].literal {
+			data[m1], data[m0] = data[m0], data[m1]
+		}
+	}
+	// now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 0000000..0d7b437
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,865 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951.  The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+	"bufio"
+	"compress/flate"
+	"fmt"
+	"io"
+	"math/bits"
+	"sync"
+)
+
+const (
+	maxCodeLen     = 16 // max length of Huffman code
+	maxCodeLenMask = 15 // mask for max length of Huffman code
+	// The next three numbers come from the RFC section 3.2.7, with the
+	// additional proviso in section 3.2.5 which implies that distance codes
+	// 30 and 31 should never occur in compressed data.
+	maxNumLit  = 286
+	maxNumDist = 30
+	numCodes   = 19 // number of codes in Huffman meta-code
+
+	debugDecode = false
+)
+
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+	length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
+var bitMask32 = [32]uint32{
+	0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+	0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+	0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+	0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError = flate.CorruptInputError
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError = flate.ReadError
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError = flate.WriteError
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+	// Reset discards any buffered data and resets the Resetter as if it was
+	// newly initialized with the given reader.
+	Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+//	http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+	huffmanChunkBits  = 9
+	huffmanNumChunks  = 1 << huffmanChunkBits
+	huffmanCountMask  = 15
+	huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+	maxRead  int                       // the maximum number of bits we can read and not overread
+	chunks   *[huffmanNumChunks]uint16 // chunks as described above
+	links    [][]uint16                // overflow links
+	linkMask uint32                    // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+	// Sanity enables additional runtime tests during Huffman
+	// table construction. It's intended to be used during
+	// development to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if h.chunks == nil {
+		h.chunks = new([huffmanNumChunks]uint16)
+	}
+
+	if h.maxRead != 0 {
+		*h = huffmanDecoder{chunks: h.chunks, links: h.links}
+	}
+
+	// Count number of codes of each length,
+	// compute maxRead and max length.
+	var count [maxCodeLen]int
+	var min, max int
+	for _, n := range lengths {
+		if n == 0 {
+			continue
+		}
+		if min == 0 || n < min {
+			min = n
+		}
+		if n > max {
+			max = n
+		}
+		count[n&maxCodeLenMask]++
+	}
+
+	// Empty tree. The decompressor.huffSym function will fail later if the tree
+	// is used. Technically, an empty tree is only valid for the HDIST tree and
+	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+	// is guaranteed to fail since it will attempt to use the tree to decode the
+	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+	// guaranteed to fail later since the compressed data section must be
+	// composed of at least one symbol (the end-of-block marker).
+	if max == 0 {
+		return true
+	}
+
+	code := 0
+	var nextcode [maxCodeLen]int
+	for i := min; i <= max; i++ {
+		code <<= 1
+		nextcode[i&maxCodeLenMask] = code
+		code += count[i&maxCodeLenMask]
+	}
+
+	// Check that the coding is complete (i.e., that we've
+	// assigned all 2-to-the-max possible bit sequences).
+	// Exception: To be compatible with zlib, we also need to
+	// accept degenerate single-code codings. See also
+	// TestDegenerateHuffmanCoding.
+	if code != 1<<uint(max) && !(code == 1 && max == 1) {
+		if debugDecode {
+			fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
+		}
+		return false
+	}
+
+	h.maxRead = min
+
+	chunks := h.chunks[:]
+	for i := range chunks {
+		chunks[i] = 0
+	}
+
+	if max > huffmanChunkBits {
+		numLinks := 1 << (uint(max) - huffmanChunkBits)
+		h.linkMask = uint32(numLinks - 1)
+
+		// create link tables
+		link := nextcode[huffmanChunkBits+1] >> 1
+		if cap(h.links) < huffmanNumChunks-link {
+			h.links = make([][]uint16, huffmanNumChunks-link)
+		} else {
+			h.links = h.links[:huffmanNumChunks-link]
+		}
+		for j := uint(link); j < huffmanNumChunks; j++ {
+			reverse := int(bits.Reverse16(uint16(j)))
+			reverse >>= uint(16 - huffmanChunkBits)
+			off := j - uint(link)
+			if sanity && h.chunks[reverse] != 0 {
+				panic("impossible: overwriting existing chunk")
+			}
+			h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
+			if cap(h.links[off]) < numLinks {
+				h.links[off] = make([]uint16, numLinks)
+			} else {
+				h.links[off] = h.links[off][:numLinks]
+			}
+		}
+	} else {
+		h.links = h.links[:0]
+	}
+
+	for i, n := range lengths {
+		if n == 0 {
+			continue
+		}
+		code := nextcode[n]
+		nextcode[n]++
+		chunk := uint16(i<<huffmanValueShift | n)
+		reverse := int(bits.Reverse16(uint16(code)))
+		reverse >>= uint(16 - n)
+		if n <= huffmanChunkBits {
+			for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+				// We should never need to overwrite
+				// an existing chunk. Also, 0 is
+				// never a valid chunk, because the
+				// lower 4 "count" bits should be
+				// between 1 and 15.
+				if sanity && h.chunks[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				h.chunks[off] = chunk
+			}
+		} else {
+			j := reverse & (huffmanNumChunks - 1)
+			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+				// Longer codes should have been
+				// associated with a link table above.
+				panic("impossible: not an indirect chunk")
+			}
+			value := h.chunks[j] >> huffmanValueShift
+			linktab := h.links[value]
+			reverse >>= huffmanChunkBits
+			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+				if sanity && linktab[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				linktab[off] = chunk
+			}
+		}
+	}
+
+	if sanity {
+		// Above we've sanity checked that we never overwrote
+		// an existing entry. Here we additionally check that
+		// we filled the tables completely.
+		for i, chunk := range h.chunks {
+			if chunk == 0 {
+				// As an exception, in the degenerate
+				// single-code case, we allow odd
+				// chunks to be missing.
+				if code == 1 && i%2 == 1 {
+					continue
+				}
+				panic("impossible: missing chunk")
+			}
+		}
+		for _, linktab := range h.links {
+			for _, chunk := range linktab {
+				if chunk == 0 {
+					panic("impossible: missing chunk")
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+// Reader is the actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+type step uint8
+
+const (
+	copyData step = iota + 1
+	nextBlock
+	huffmanBytesBuffer
+	huffmanBytesReader
+	huffmanBufioReader
+	huffmanStringsReader
+	huffmanGenericReader
+)
+
+// flushMode tells decompressor when to return data
+type flushMode uint8
+
+const (
+	syncFlush    flushMode = iota // return data after sync flush block
+	partialFlush                  // return data after each block
+)
+
+// Decompress state.
+type decompressor struct {
+	// Input source.
+	r       Reader
+	roffset int64
+
+	// Huffman decoders for literal/length, distance.
+	h1, h2 huffmanDecoder
+
+	// Length arrays used to define Huffman codes.
+	bits     *[maxNumLit + maxNumDist]int
+	codebits *[numCodes]int
+
+	// Output history, buffer.
+	dict dictDecoder
+
+	// Next step in the decompression,
+	// and decompression state.
+	step      step
+	stepState int
+	err       error
+	toRead    []byte
+	hl, hd    *huffmanDecoder
+	copyLen   int
+	copyDist  int
+
+	// Temporary buffer (avoids repeated allocation).
+	buf [4]byte
+
+	// Input bits, in top of b.
+	b uint32
+
+	nb    uint
+	final bool
+
+	flushMode flushMode
+}
+
+func (f *decompressor) nextBlock() {
+	for f.nb < 1+2 {
+		if f.err = f.moreBits(); f.err != nil {
+			return
+		}
+	}
+	f.final = f.b&1 == 1
+	f.b >>= 1
+	typ := f.b & 3
+	f.b >>= 2
+	f.nb -= 1 + 2
+	switch typ {
+	case 0:
+		f.dataBlock()
+		if debugDecode {
+			fmt.Println("stored block")
+		}
+	case 1:
+		// compressed, fixed Huffman tables
+		f.hl = &fixedHuffmanDecoder
+		f.hd = nil
+		f.huffmanBlockDecoder()
+		if debugDecode {
+			fmt.Println("predefinied huffman block")
+		}
+	case 2:
+		// compressed, dynamic Huffman tables
+		if f.err = f.readHuffman(); f.err != nil {
+			break
+		}
+		f.hl = &f.h1
+		f.hd = &f.h2
+		f.huffmanBlockDecoder()
+		if debugDecode {
+			fmt.Println("dynamic huffman block")
+		}
+	default:
+		// 3 is reserved.
+		if debugDecode {
+			fmt.Println("reserved data block encountered")
+		}
+		f.err = CorruptInputError(f.roffset)
+	}
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+	for {
+		if len(f.toRead) > 0 {
+			n := copy(b, f.toRead)
+			f.toRead = f.toRead[n:]
+			if len(f.toRead) == 0 {
+				return n, f.err
+			}
+			return n, nil
+		}
+		if f.err != nil {
+			return 0, f.err
+		}
+
+		f.doStep()
+
+		if f.err != nil && len(f.toRead) == 0 {
+			f.toRead = f.dict.readFlush() // Flush what's left in case of error
+		}
+	}
+}
+
+// WriteTo implements the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	flushed := false
+	for {
+		if len(f.toRead) > 0 {
+			n, err := w.Write(f.toRead)
+			total += int64(n)
+			if err != nil {
+				f.err = err
+				return total, err
+			}
+			if n != len(f.toRead) {
+				return total, io.ErrShortWrite
+			}
+			f.toRead = f.toRead[:0]
+		}
+		if f.err != nil && flushed {
+			if f.err == io.EOF {
+				return total, nil
+			}
+			return total, f.err
+		}
+		if f.err == nil {
+			f.doStep()
+		}
+		if len(f.toRead) == 0 && f.err != nil && !flushed {
+			f.toRead = f.dict.readFlush() // Flush what's left in case of error
+			flushed = true
+		}
+	}
+}
+
+func (f *decompressor) Close() error {
+	if f.err == io.EOF {
+		return nil
+	}
+	return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+	// HLIT[5], HDIST[5], HCLEN[4].
+	for f.nb < 5+5+4 {
+		if err := f.moreBits(); err != nil {
+			return err
+		}
+	}
+	nlit := int(f.b&0x1F) + 257
+	if nlit > maxNumLit {
+		if debugDecode {
+			fmt.Println("nlit > maxNumLit", nlit)
+		}
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	ndist := int(f.b&0x1F) + 1
+	if ndist > maxNumDist {
+		if debugDecode {
+			fmt.Println("ndist > maxNumDist", ndist)
+		}
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	nclen := int(f.b&0xF) + 4
+	// numCodes is 19, so nclen is always valid.
+	f.b >>= 4
+	f.nb -= 5 + 5 + 4
+
+	// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+	for i := 0; i < nclen; i++ {
+		for f.nb < 3 {
+			if err := f.moreBits(); err != nil {
+				return err
+			}
+		}
+		f.codebits[codeOrder[i]] = int(f.b & 0x7)
+		f.b >>= 3
+		f.nb -= 3
+	}
+	for i := nclen; i < len(codeOrder); i++ {
+		f.codebits[codeOrder[i]] = 0
+	}
+	if !f.h1.init(f.codebits[0:]) {
+		if debugDecode {
+			fmt.Println("init codebits failed")
+		}
+		return CorruptInputError(f.roffset)
+	}
+
+	// HLIT + 257 code lengths, HDIST + 1 code lengths,
+	// using the code length Huffman code.
+	for i, n := 0, nlit+ndist; i < n; {
+		x, err := f.huffSym(&f.h1)
+		if err != nil {
+			return err
+		}
+		if x < 16 {
+			// Actual length.
+			f.bits[i] = x
+			i++
+			continue
+		}
+		// Repeat previous length or zero.
+		var rep int
+		var nb uint
+		var b int
+		switch x {
+		default:
+			return InternalError("unexpected length code")
+		case 16:
+			rep = 3
+			nb = 2
+			if i == 0 {
+				if debugDecode {
+					fmt.Println("i==0")
+				}
+				return CorruptInputError(f.roffset)
+			}
+			b = f.bits[i-1]
+		case 17:
+			rep = 3
+			nb = 3
+			b = 0
+		case 18:
+			rep = 11
+			nb = 7
+			b = 0
+		}
+		for f.nb < nb {
+			if err := f.moreBits(); err != nil {
+				if debugDecode {
+					fmt.Println("morebits:", err)
+				}
+				return err
+			}
+		}
+		rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
+		f.b >>= nb & regSizeMaskUint32
+		f.nb -= nb
+		if i+rep > n {
+			if debugDecode {
+				fmt.Println("i+rep > n", i, rep, n)
+			}
+			return CorruptInputError(f.roffset)
+		}
+		for j := 0; j < rep; j++ {
+			f.bits[i] = b
+			i++
+		}
+	}
+
+	if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+		if debugDecode {
+			fmt.Println("init2 failed")
+		}
+		return CorruptInputError(f.roffset)
+	}
+
+	// As an optimization, we can initialize the maxRead bits to read at a time
+	// for the HLIT tree to the length of the EOB marker since we know that
+	// every block must terminate with one. This preserves the property that
+	// we never read any extra bytes after the end of the DEFLATE stream.
+	if f.h1.maxRead < f.bits[endBlockMarker] {
+		f.h1.maxRead = f.bits[endBlockMarker]
+	}
+	if !f.final {
+		// If not the final block, the smallest block possible is
+		// a predefined table, BTYPE=01, with a single EOB marker.
+		// This will take up 3 + 7 bits.
+		f.h1.maxRead += 10
+	}
+
+	return nil
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+	// Uncompressed.
+	// Discard current half-byte.
+	left := (f.nb) & 7
+	f.nb -= left
+	f.b >>= left
+
+	offBytes := f.nb >> 3
+	// Unfilled values will be overwritten.
+	f.buf[0] = uint8(f.b)
+	f.buf[1] = uint8(f.b >> 8)
+	f.buf[2] = uint8(f.b >> 16)
+	f.buf[3] = uint8(f.b >> 24)
+
+	f.roffset += int64(offBytes)
+	f.nb, f.b = 0, 0
+
+	// Length then ones-complement of length.
+	nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+	f.roffset += int64(nr)
+	if err != nil {
+		f.err = noEOF(err)
+		return
+	}
+	n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+	nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+	if nn != ^n {
+		if debugDecode {
+			ncomp := ^n
+			fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+		}
+		f.err = CorruptInputError(f.roffset)
+		return
+	}
+
+	if n == 0 {
+		if f.flushMode == syncFlush {
+			f.toRead = f.dict.readFlush()
+		}
+
+		f.finishBlock()
+		return
+	}
+
+	f.copyLen = int(n)
+	f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+	buf := f.dict.writeSlice()
+	if len(buf) > f.copyLen {
+		buf = buf[:f.copyLen]
+	}
+
+	cnt, err := io.ReadFull(f.r, buf)
+	f.roffset += int64(cnt)
+	f.copyLen -= cnt
+	f.dict.writeMark(cnt)
+	if err != nil {
+		f.err = noEOF(err)
+		return
+	}
+
+	if f.dict.availWrite() == 0 || f.copyLen > 0 {
+		f.toRead = f.dict.readFlush()
+		f.step = copyData
+		return
+	}
+	f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+	if f.final {
+		if f.dict.availRead() > 0 {
+			f.toRead = f.dict.readFlush()
+		}
+
+		f.err = io.EOF
+	} else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
+		f.toRead = f.dict.readFlush()
+	}
+
+	f.step = nextBlock
+}
+
+func (f *decompressor) doStep() {
+	switch f.step {
+	case copyData:
+		f.copyData()
+	case nextBlock:
+		f.nextBlock()
+	case huffmanBytesBuffer:
+		f.huffmanBytesBuffer()
+	case huffmanBytesReader:
+		f.huffmanBytesReader()
+	case huffmanBufioReader:
+		f.huffmanBufioReader()
+	case huffmanStringsReader:
+		f.huffmanStringsReader()
+	case huffmanGenericReader:
+		f.huffmanGenericReader()
+	default:
+		panic("BUG: unexpected step state")
+	}
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+	if e == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	return e
+}
+
+func (f *decompressor) moreBits() error {
+	c, err := f.r.ReadByte()
+	if err != nil {
+		return noEOF(err)
+	}
+	f.roffset++
+	f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
+	f.nb += 8
+	return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+	// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+	// with single element, huffSym must error on these two edge cases. In both
+	// cases, the chunks slice will be 0 for the invalid sequence, leading it
+	// satisfy the n == 0 check below.
+	n := uint(h.maxRead)
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	nb, b := f.nb, f.b
+	for {
+		for nb < n {
+			c, err := f.r.ReadByte()
+			if err != nil {
+				f.b = b
+				f.nb = nb
+				return 0, noEOF(err)
+			}
+			f.roffset++
+			b |= uint32(c) << (nb & regSizeMaskUint32)
+			nb += 8
+		}
+		chunk := h.chunks[b&(huffmanNumChunks-1)]
+		n = uint(chunk & huffmanCountMask)
+		if n > huffmanChunkBits {
+			chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+			n = uint(chunk & huffmanCountMask)
+		}
+		if n <= nb {
+			if n == 0 {
+				f.b = b
+				f.nb = nb
+				if debugDecode {
+					fmt.Println("huffsym: n==0")
+				}
+				f.err = CorruptInputError(f.roffset)
+				return 0, f.err
+			}
+			f.b = b >> (n & regSizeMaskUint32)
+			f.nb = nb - n
+			return int(chunk >> huffmanValueShift), nil
+		}
+	}
+}
+
+func makeReader(r io.Reader) Reader {
+	if rr, ok := r.(Reader); ok {
+		return rr
+	}
+	return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+	fixedOnce.Do(func() {
+		// These come from the RFC section 3.2.6.
+		var bits [288]int
+		for i := 0; i < 144; i++ {
+			bits[i] = 8
+		}
+		for i := 144; i < 256; i++ {
+			bits[i] = 9
+		}
+		for i := 256; i < 280; i++ {
+			bits[i] = 7
+		}
+		for i := 280; i < 288; i++ {
+			bits[i] = 8
+		}
+		fixedHuffmanDecoder.init(bits[:])
+	})
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+	*f = decompressor{
+		r:        makeReader(r),
+		bits:     f.bits,
+		codebits: f.codebits,
+		h1:       f.h1,
+		h2:       f.h2,
+		dict:     f.dict,
+		step:     nextBlock,
+	}
+	f.dict.init(maxMatchOffset, dict)
+	return nil
+}
+
+type ReaderOpt func(*decompressor)
+
+// WithPartialBlock tells decompressor to return after each block,
+// so it can read data written with partial flush
+func WithPartialBlock() ReaderOpt {
+	return func(f *decompressor) {
+		f.flushMode = partialFlush
+	}
+}
+
+// WithDict initializes the reader with a preset dictionary
+func WithDict(dict []byte) ReaderOpt {
+	return func(f *decompressor) {
+		f.dict.init(maxMatchOffset, dict)
+	}
+}
+
+// NewReaderOpts returns new reader with provided options
+func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
+	fixedHuffmanDecoderInit()
+
+	var f decompressor
+	f.r = makeReader(r)
+	f.bits = new([maxNumLit + maxNumDist]int)
+	f.codebits = new([numCodes]int)
+	f.step = nextBlock
+	f.dict.init(maxMatchOffset, nil)
+
+	for _, opt := range opts {
+		opt(&f)
+	}
+
+	return &f
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+	return NewReaderOpts(r)
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+	return NewReaderOpts(r, WithDict(dict))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 0000000..2b2f993
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,1283 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"math/bits"
+	"strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bytes.Buffer)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBytesBuffer
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBytesBuffer // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bytes.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBytesReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBytesReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bufio.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBufioReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBufioReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*strings.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanStringsReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanStringsReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanGenericReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanGenericReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+func (f *decompressor) huffmanBlockDecoder() {
+	switch f.r.(type) {
+	case *bytes.Buffer:
+		f.huffmanBytesBuffer()
+	case *bytes.Reader:
+		f.huffmanBytesReader()
+	case *bufio.Reader:
+		f.huffmanBufioReader()
+	case *strings.Reader:
+		f.huffmanStringsReader()
+	case Reader:
+		f.huffmanGenericReader()
+	default:
+		f.huffmanGenericReader()
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 0000000..c3581a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,215 @@
+package flate
+
+import (
+	"fmt"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+	fastGen
+	table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashBytes              = 5
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+
+	for {
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		var t int32
+		for {
+			nextHash := hashLen(cv, tableBits, hashBytes)
+			candidate = e.table[nextHash]
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+
+			now := load6432(src, nextS)
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+			nextHash = hashLen(now, tableBits, hashBytes)
+			t = candidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+
+			// Do one right away...
+			cv = now
+			s = nextS
+			nextS++
+			candidate = e.table[nextHash]
+			now >>= 8
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+			t = candidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+			cv = now
+			s = nextS
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			// Save the match found
+			if false {
+				dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			} else {
+				// Inlined...
+				xoffset := uint32(s - t - baseMatchOffset)
+				xlength := l
+				oc := offsetCode(xoffset)
+				xoffset |= oc << 16
+				for xlength > 0 {
+					xl := xlength
+					if xl > 258 {
+						if xl > 258+baseMatchLength {
+							xl = 258
+						} else {
+							xl = 258 - baseMatchLength
+						}
+					}
+					xlength -= xl
+					xl -= baseMatchLength
+					dst.extraHist[lengthCodes1[uint8(xl)]]++
+					dst.offHist[oc]++
+					dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+					dst.n++
+				}
+			}
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+			if s >= sLimit {
+				// Index first pair after match end.
+				if int(s+l+8) < len(src) {
+					cv := load6432(src, s)
+					e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+				}
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6432(src, s-2)
+			o := e.cur + s - 2
+			prevHash := hashLen(x, tableBits, hashBytes)
+			e.table[prevHash] = tableEntry{offset: o}
+			x >>= 16
+			currHash := hashLen(x, tableBits, hashBytes)
+			candidate = e.table[currHash]
+			e.table[currHash] = tableEntry{offset: o + 2}
+
+			t = candidate.offset - e.cur
+			if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
+				cv = x >> 8
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 0000000..c8d047f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,214 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+	fastGen
+	table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashBytes              = 5
+	)
+
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		// When should we start skipping if we haven't found matches in a long while.
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashLen(cv, bTableBits, hashBytes)
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = e.table[nextHash]
+			now := load6432(src, nextS)
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+			nextHash = hashLen(now, bTableBits, hashBytes)
+
+			offset := s - (candidate.offset - e.cur)
+			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+
+			// Do one right away...
+			cv = now
+			s = nextS
+			nextS++
+			candidate = e.table[nextHash]
+			now >>= 8
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+			offset = s - (candidate.offset - e.cur)
+			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				break
+			}
+			cv = now
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			t := candidate.offset - e.cur
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+
+			if s >= sLimit {
+				// Index first pair after match end.
+				if int(s+l+8) < len(src) {
+					cv := load6432(src, s)
+					e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+				}
+				goto emitRemainder
+			}
+
+			// Store every second hash in-between, but offset by 1.
+			for i := s - l + 2; i < s-5; i += 7 {
+				x := load6432(src, i)
+				nextHash := hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i}
+				// Skip one
+				x >>= 16
+				nextHash = hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+				// Skip one
+				x >>= 16
+				nextHash = hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 to s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6432(src, s-2)
+			o := e.cur + s - 2
+			prevHash := hashLen(x, bTableBits, hashBytes)
+			prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
+			e.table[prevHash] = tableEntry{offset: o}
+			e.table[prevHash2] = tableEntry{offset: o + 1}
+			currHash := hashLen(x>>16, bTableBits, hashBytes)
+			candidate = e.table[currHash]
+			e.table[currHash] = tableEntry{offset: o + 2}
+
+			offset := s - (candidate.offset - e.cur)
+			if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+				cv = x >> 24
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 0000000..33f9fb1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,241 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+	fastGen
+	table [1 << 16]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		tableBits              = 16
+		tableSize              = 1 << tableBits
+		hashBytes              = 5
+	)
+
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+			}
+			if v.Prev.offset <= minOff {
+				v.Prev.offset = 0
+			} else {
+				v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+			}
+			e.table[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// Skip if too small.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 7
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashLen(cv, tableBits, hashBytes)
+			s = nextS
+			nextS = s + 1 + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidates := e.table[nextHash]
+			now := load6432(src, nextS)
+
+			// Safe offset distance until s + 4...
+			minOffset := e.cur + s - (maxMatchOffset - 4)
+			e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+			// Check both candidates
+			candidate = candidates.Cur
+			if candidate.offset < minOffset {
+				cv = now
+				// Previous will also be invalid, we have nothing.
+				continue
+			}
+
+			if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
+					break
+				}
+				// Both match and are valid, pick longest.
+				offset := s - (candidate.offset - e.cur)
+				o2 := s - (candidates.Prev.offset - e.cur)
+				l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+				if l2 > l1 {
+					candidate = candidates.Prev
+				}
+				break
+			} else {
+				// We only check if value mismatches.
+				// Offset will always be invalid in other cases.
+				candidate = candidates.Prev
+				if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					break
+				}
+			}
+			cv = now
+		}
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			//
+			t := candidate.offset - e.cur
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+
+			if s >= sLimit {
+				t += l
+				// Index first pair after match end.
+				if int(t+8) < len(src) && t > 0 {
+					cv = load6432(src, t)
+					nextHash := hashLen(cv, tableBits, hashBytes)
+					e.table[nextHash] = tableEntryPrev{
+						Prev: e.table[nextHash].Cur,
+						Cur:  tableEntry{offset: e.cur + t},
+					}
+				}
+				goto emitRemainder
+			}
+
+			// Store every 5th hash in-between.
+			for i := s - l + 2; i < s-5; i += 6 {
+				nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+				e.table[nextHash] = tableEntryPrev{
+					Prev: e.table[nextHash].Cur,
+					Cur:  tableEntry{offset: e.cur + i}}
+			}
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 to s.
+			x := load6432(src, s-2)
+			prevHash := hashLen(x, tableBits, hashBytes)
+
+			e.table[prevHash] = tableEntryPrev{
+				Prev: e.table[prevHash].Cur,
+				Cur:  tableEntry{offset: e.cur + s - 2},
+			}
+			x >>= 8
+			prevHash = hashLen(x, tableBits, hashBytes)
+
+			e.table[prevHash] = tableEntryPrev{
+				Prev: e.table[prevHash].Cur,
+				Cur:  tableEntry{offset: e.cur + s - 1},
+			}
+			x >>= 8
+			currHash := hashLen(x, tableBits, hashBytes)
+			candidates := e.table[currHash]
+			cv = x
+			e.table[currHash] = tableEntryPrev{
+				Prev: candidates.Cur,
+				Cur:  tableEntry{offset: s + e.cur},
+			}
+
+			// Check both candidates
+			candidate = candidates.Cur
+			minOffset := e.cur + s - (maxMatchOffset - 4)
+
+			if candidate.offset > minOffset {
+				if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					// Found a match...
+					continue
+				}
+				candidate = candidates.Prev
+				if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					// Match at prev...
+					continue
+				}
+			}
+			cv = x >> 8
+			s++
+			break
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 0000000..88509e1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,221 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.bTable[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			e.bTable[nextHashL] = entry
+
+			t = lCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// We got a long match. Use that.
+				break
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				lCandidate = e.bTable[hash7(next, tableBits)]
+
+				// If the next long is a candidate, check if we should use that instead...
+				lOff := lCandidate.offset - e.cur
+				if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
+					l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+					if l2 > l1 {
+						s = nextS
+						t = lCandidate.offset - e.cur
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic("s-t")
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			// Index first pair after match end.
+			if int(s+8) < len(src) {
+				cv := load6432(src, s)
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
+				e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+			}
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between
+		if true {
+			i := nextS
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				t2 := tableEntry{offset: t.offset + 1}
+				e.bTable[hash7(cv, tableBits)] = t
+				e.bTable[hash7(cv>>8, tableBits)] = t2
+				e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+
+				i += 3
+				for ; i < s-1; i += 3 {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					e.bTable[hash7(cv, tableBits)] = t
+					e.bTable[hash7(cv>>8, tableBits)] = t2
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		e.bTable[prevHashL] = tableEntry{offset: o}
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 0000000..6e5c215
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,708 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(int(s+4), int(t+4), src) + 4
+						ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(int(s+4), int(t+4), src) + 4
+				lCandidate = e.bTable[nextHashL]
+				// Store the next match
+
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// If the next long is a candidate, use that...
+				t2 := lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		if l == 0 {
+			// Extend the 4-byte match as long as possible.
+			l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(int(s+l), int(t+l), src)
+		}
+
+		// Try to locate a better match by checking the end of best match...
+		if sAt := s + l; l < 30 && sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+			t2 := eLong - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if t2 >= 0 && off < maxMatchOffset && off > 0 {
+				if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+					t = t2
+					l = l2
+					s = s2
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between.
+		if true {
+			const hashEvery = 3
+			i := s - l + 1
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// Do an long at i+1
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				eLong = &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// We only have enough bits for a short entry at i+2
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+				// Skip one - otherwise we risk hitting 's'
+				i += 4
+				for ; i < s-1; i += hashEvery {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					eLong := &e.bTable[hash7(cv, tableBits)]
+					eLong.Cur, eLong.Prev = t, eLong.Cur
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		eLong := &e.bTable[prevHashL]
+		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
+
+// fastEncL5Window is a level 5 encoder,
+// but with a custom window size.
+type fastEncL5Window struct {
+	hist      []byte
+	cur       int32
+	maxOffset int32
+	table     [tableSize]tableEntry
+	bTable    [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	maxMatchOffset := e.maxOffset
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(s+4, t+4, src) + 4
+						ml1 := e.matchlen(s+4, t2+4, src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(s+4, t+4, src) + 4
+				lCandidate = e.bTable[nextHashL]
+				// Store the next match
+
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// If the next long is a candidate, use that...
+				t2 := lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(nextS+4, t2+4, src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(nextS+4, t2+4, src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		if l == 0 {
+			// Extend the 4-byte match as long as possible.
+			l = e.matchlenLong(s+4, t+4, src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(s+l, t+l, src)
+		}
+
+		// Try to locate a better match by checking the end of best match...
+		if sAt := s + l; l < 30 && sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+			t2 := eLong - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if t2 >= 0 && off < maxMatchOffset && off > 0 {
+				if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+					t = t2
+					l = l2
+					s = s2
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between.
+		if true {
+			const hashEvery = 3
+			i := s - l + 1
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// Do an long at i+1
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				eLong = &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// We only have enough bits for a short entry at i+2
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+				// Skip one - otherwise we risk hitting 's'
+				i += 4
+				for ; i < s-1; i += hashEvery {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					eLong := &e.bTable[hash7(cv, tableBits)]
+					eLong.Cur, eLong.Prev = t, eLong.Cur
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		eLong := &e.bTable[prevHashL]
+		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
+
+// Reset the encoding table.
+func (e *fastEncL5Window) Reset() {
+	// We keep the same allocs, since we are compressing the same block sizes.
+	if cap(e.hist) < allocHistory {
+		e.hist = make([]byte, 0, allocHistory)
+	}
+
+	// We offset current position so everything will be out of reach.
+	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+	if e.cur <= int32(bufferReset) {
+		e.cur += e.maxOffset + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+}
+
+func (e *fastEncL5Window) addBlock(src []byte) int32 {
+	// check if we have space already
+	maxMatchOffset := e.maxOffset
+
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.hist = make([]byte, 0, allocHistory)
+		} else {
+			if cap(e.hist) < int(maxMatchOffset*2) {
+				panic("unexpected buffer size")
+			}
+			// Move down
+			offset := int32(len(e.hist)) - maxMatchOffset
+			copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:maxMatchOffset]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
+	if debugDecode {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > e.maxOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	s1 := int(s) + maxMatchLength - 4
+	if s1 > len(src) {
+		s1 = len(src)
+	}
+
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > e.maxOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 0000000..96f5bb4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,325 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	// Repeat MUST be > 1 and within range
+	repeat := int32(1)
+	for {
+		const skipLog = 7
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			// Calculate hashes of 'next'
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Long candidate matches at least 4 bytes.
+
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					// Check the previous long candidate as well.
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(int(s+4), int(t+4), src) + 4
+						ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				// Current value did not match, but check if previous long value does.
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(int(s+4), int(t+4), src) + 4
+
+				// Look up next long candidate (at nextS)
+				lCandidate = e.bTable[nextHashL]
+
+				// Store the next match
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// Check repeat at s + repOff
+				const repOff = 1
+				t2 := s - repeat + repOff
+				if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+					ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
+					if ml > l {
+						t = t2
+						l = ml
+						s += repOff
+						// Not worth checking more.
+						break
+					}
+				}
+
+				// If the next long is a candidate, use that...
+				t2 = lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							// This is ok, but check previous as well.
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Extend the 4-byte match as long as possible.
+		if l == 0 {
+			l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(int(s+l), int(t+l), src)
+		}
+
+		// Try to locate a better match by checking the end-of-match...
+		if sAt := s + l; sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
+			// Test current
+			t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if off < maxMatchOffset {
+				if off > 0 && t2 >= 0 {
+					if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+						t = t2
+						l = l2
+						s = s2
+					}
+				}
+				// Test next:
+				t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+				off := s2 - t2
+				if off > 0 && off < maxMatchOffset && t2 >= 0 {
+					if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+						t = t2
+						l = l2
+						s = s2
+					}
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if false {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		repeat = s - t
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			// Index after match end.
+			for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+				cv := load6432(src, i)
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+			}
+			goto emitRemainder
+		}
+
+		// Store every long hash in-between and every second short.
+		if true {
+			for i := nextS + 1; i < s-1; i += 2 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				t2 := tableEntry{offset: t.offset + 1}
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+				eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		cv = load6432(src, s)
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
new file mode 100644
index 0000000..6149384
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
@@ -0,0 +1,34 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package flate
+
+import (
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+	left := len(a)
+	for left >= 8 {
+		diff := le.Load64(a, n) ^ le.Load64(b, n)
+		if diff != 0 {
+			return n + bits.TrailingZeros64(diff)>>3
+		}
+		n += 8
+		left -= 8
+	}
+
+	a = a[n:]
+	b = b[n:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 0000000..6ed2806
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+	// Masks for shifts with register sizes of the shift value.
+	// This can be used to work around the x86 design of shifting by mod register size.
+	// It can be used when a variable shift is always smaller than the register size.
+
+	// reg8SizeMaskX - shift value is 8 bits, shifted is X
+	reg8SizeMask8  = 7
+	reg8SizeMask16 = 15
+	reg8SizeMask32 = 31
+	reg8SizeMask64 = 63
+
+	// reg16SizeMaskX - shift value is 16 bits, shifted is X
+	reg16SizeMask8  = reg8SizeMask8
+	reg16SizeMask16 = reg8SizeMask16
+	reg16SizeMask32 = reg8SizeMask32
+	reg16SizeMask64 = reg8SizeMask64
+
+	// reg32SizeMaskX - shift value is 32 bits, shifted is X
+	reg32SizeMask8  = reg8SizeMask8
+	reg32SizeMask16 = reg8SizeMask16
+	reg32SizeMask32 = reg8SizeMask32
+	reg32SizeMask64 = reg8SizeMask64
+
+	// reg64SizeMaskX - shift value is 64 bits, shifted is X
+	reg64SizeMask8  = reg8SizeMask8
+	reg64SizeMask16 = reg8SizeMask16
+	reg64SizeMask32 = reg8SizeMask32
+	reg64SizeMask64 = reg8SizeMask64
+
+	// regSizeMaskUintX - shift value is uint, shifted is X
+	regSizeMaskUint8  = reg8SizeMask8
+	regSizeMaskUint16 = reg8SizeMask16
+	regSizeMaskUint32 = reg8SizeMask32
+	regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 0000000..1b7a2cb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,40 @@
+//go:build !amd64
+// +build !amd64
+
+package flate
+
+const (
+	// Masks for shifts with register sizes of the shift value.
+	// This can be used to work around the x86 design of shifting by mod register size.
+	// It can be used when a variable shift is always smaller than the register size.
+
+	// reg8SizeMaskX - shift value is 8 bits, shifted is X
+	reg8SizeMask8  = 0xff
+	reg8SizeMask16 = 0xff
+	reg8SizeMask32 = 0xff
+	reg8SizeMask64 = 0xff
+
+	// reg16SizeMaskX - shift value is 16 bits, shifted is X
+	reg16SizeMask8  = 0xffff
+	reg16SizeMask16 = 0xffff
+	reg16SizeMask32 = 0xffff
+	reg16SizeMask64 = 0xffff
+
+	// reg32SizeMaskX - shift value is 32 bits, shifted is X
+	reg32SizeMask8  = 0xffffffff
+	reg32SizeMask16 = 0xffffffff
+	reg32SizeMask32 = 0xffffffff
+	reg32SizeMask64 = 0xffffffff
+
+	// reg64SizeMaskX - shift value is 64 bits, shifted is X
+	reg64SizeMask8  = 0xffffffffffffffff
+	reg64SizeMask16 = 0xffffffffffffffff
+	reg64SizeMask32 = 0xffffffffffffffff
+	reg64SizeMask64 = 0xffffffffffffffff
+
+	// regSizeMaskUintX - shift value is uint, shifted is X
+	regSizeMaskUint8  = ^uint(0)
+	regSizeMaskUint16 = ^uint(0)
+	regSizeMaskUint32 = ^uint(0)
+	regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 0000000..13b9b10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,313 @@
+package flate
+
+import (
+	"io"
+	"math"
+	"sync"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+const (
+	maxStatelessBlock = math.MaxInt16
+	// dictionary will be taken from maxStatelessBlock, so limit it.
+	maxStatelessDict = 8 << 10
+
+	slTableBits  = 13
+	slTableSize  = 1 << slTableBits
+	slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+	dst    io.Writer
+	closed bool
+}
+
+func (s *statelessWriter) Close() error {
+	if s.closed {
+		return nil
+	}
+	s.closed = true
+	// Emit EOF block
+	return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+	err = StatelessDeflate(s.dst, p, false, nil)
+	if err != nil {
+		return 0, err
+	}
+	return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+	s.dst = w
+	s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+	return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+	New: func() interface{} {
+		return newHuffmanBitWriter(nil)
+	},
+}
+
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+	var dst tokens
+	bw := bitWriterPool.Get().(*huffmanBitWriter)
+	bw.reset(out)
+	defer func() {
+		// don't keep a reference to our output
+		bw.reset(nil)
+		bitWriterPool.Put(bw)
+	}()
+	if eof && len(in) == 0 {
+		// Just write an EOF block.
+		// Could be faster...
+		bw.writeStoredHeader(0, true)
+		bw.flush()
+		return bw.err
+	}
+
+	// Truncate dict
+	if len(dict) > maxStatelessDict {
+		dict = dict[len(dict)-maxStatelessDict:]
+	}
+
+	// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
+	var inDict []byte
+
+	for len(in) > 0 {
+		todo := in
+		if len(inDict) > 0 {
+			if len(todo) > maxStatelessBlock-maxStatelessDict {
+				todo = todo[:maxStatelessBlock-maxStatelessDict]
+			}
+		} else if len(todo) > maxStatelessBlock-len(dict) {
+			todo = todo[:maxStatelessBlock-len(dict)]
+		}
+		inOrg := in
+		in = in[len(todo):]
+		uncompressed := todo
+		if len(dict) > 0 {
+			// combine dict and source
+			bufLen := len(todo) + len(dict)
+			combined := make([]byte, bufLen)
+			copy(combined, dict)
+			copy(combined[len(dict):], todo)
+			todo = combined
+		}
+		// Compress
+		if len(inDict) == 0 {
+			statelessEnc(&dst, todo, int16(len(dict)))
+		} else {
+			statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+		}
+		isEof := eof && len(in) == 0
+
+		if dst.n == 0 {
+			bw.writeStoredHeader(len(uncompressed), isEof)
+			if bw.err != nil {
+				return bw.err
+			}
+			bw.writeBytes(uncompressed)
+		} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+			// If we removed less than 1/16th, huffman compress the block.
+			bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+		} else {
+			bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+		}
+		if len(in) > 0 {
+			// Retain a dict if we have more
+			inDict = inOrg[len(uncompressed)-maxStatelessDict:]
+			dict = nil
+			dst.Reset()
+		}
+		if bw.err != nil {
+			return bw.err
+		}
+	}
+	if !eof {
+		// Align, only a stored block can do that.
+		bw.writeStoredHeader(0, false)
+	}
+	bw.flush()
+	return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+	return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+	return le.Load32(b, i)
+}
+
+func load6416(b []byte, i int16) uint64 {
+	return le.Load64(b, i)
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+
+	type tableEntry struct {
+		offset int16
+	}
+
+	var table [slTableSize]tableEntry
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src)-int(startAt) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = 0
+		return
+	}
+	// Index until startAt
+	if startAt > 0 {
+		cv := load3232(src, 0)
+		for i := int16(0); i < startAt; i++ {
+			table[hashSL(cv)] = tableEntry{offset: i}
+			cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+		}
+	}
+
+	s := startAt + 1
+	nextEmit := startAt
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int16(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load3216(src, s)
+
+	for {
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashSL(cv)
+			candidate = table[nextHash]
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit || nextS <= 0 {
+				goto emitRemainder
+			}
+
+			now := load6416(src, nextS)
+			table[nextHash] = tableEntry{offset: s}
+			nextHash = hashSL(uint32(now))
+
+			if cv == load3216(src, candidate.offset) {
+				table[nextHash] = tableEntry{offset: nextS}
+				break
+			}
+
+			// Do one right away...
+			cv = uint32(now)
+			s = nextS
+			nextS++
+			candidate = table[nextHash]
+			now >>= 8
+			table[nextHash] = tableEntry{offset: s}
+
+			if cv == load3216(src, candidate.offset) {
+				table[nextHash] = tableEntry{offset: nextS}
+				break
+			}
+			cv = uint32(now)
+			s = nextS
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			t := candidate.offset
+			l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			// Save the match found
+			dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6416(src, s-2)
+			o := s - 2
+			prevHash := hashSL(uint32(x))
+			table[prevHash] = tableEntry{offset: o}
+			x >>= 16
+			currHash := hashSL(uint32(x))
+			candidate = table[currHash]
+			table[currHash] = tableEntry{offset: o + 2}
+
+			if uint32(x) != load3216(src, candidate.offset) {
+				cv = uint32(x >> 8)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 0000000..d818790
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	// bits 0-16  	xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+	// bits 16-22	offsetcode - 5 bits
+	// bits 22-30   xlength = length - MIN_MATCH_LENGTH - 8 bits
+	// bits 30-32   type   0 = literal  1=EOF  2=Match   3=Unused - 2 bits
+	lengthShift         = 22
+	offsetMask          = 1<<lengthShift - 1
+	typeMask            = 3 << 30
+	literalType         = 0 << 30
+	matchType           = 1 << 30
+	matchOffsetOnlyMask = 0xffff
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [256]uint8{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+	13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+	15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+	17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+	18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+	19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+	20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+	21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+	22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+	22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 28,
+}
+
+// lengthCodes1 is length codes, but starting at 1.
+var lengthCodes1 = [256]uint8{
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
+	10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
+	14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+	16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+	18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
+	19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+	20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+	22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+	23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 29,
+}
+
+var offsetCodes = [256]uint32{
+	0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+	8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+	10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+	11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+// offsetCodes14 are offsetCodes, but with 14 added.
+var offsetCodes14 = [256]uint32{
+	14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+}
+
+type token uint32
+
+type tokens struct {
+	extraHist [32]uint16  // codes 256->maxnumlit
+	offHist   [32]uint16  // offset codes
+	litHist   [256]uint16 // codes 0->255
+	nFilled   int
+	n         uint16 // Must be able to contain maxStoreBlockSize
+	tokens    [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+	if t.n == 0 {
+		return
+	}
+	t.n = 0
+	t.nFilled = 0
+	for i := range t.litHist[:] {
+		t.litHist[i] = 0
+	}
+	for i := range t.extraHist[:] {
+		t.extraHist[i] = 0
+	}
+	for i := range t.offHist[:] {
+		t.offHist[i] = 0
+	}
+}
+
+func (t *tokens) Fill() {
+	if t.n == 0 {
+		return
+	}
+	for i, v := range t.litHist[:] {
+		if v == 0 {
+			t.litHist[i] = 1
+			t.nFilled++
+		}
+	}
+	for i, v := range t.extraHist[:literalCount-256] {
+		if v == 0 {
+			t.nFilled++
+			t.extraHist[i] = 1
+		}
+	}
+	for i, v := range t.offHist[:offsetCodeCount] {
+		if v == 0 {
+			t.offHist[i] = 1
+		}
+	}
+}
+
+func indexTokens(in []token) tokens {
+	var t tokens
+	t.indexTokens(in)
+	return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+	t.Reset()
+	for _, tok := range in {
+		if tok < matchType {
+			t.AddLiteral(tok.literal())
+			continue
+		}
+		t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
+	}
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+	for _, v := range lit {
+		dst.tokens[dst.n] = token(v)
+		dst.litHist[v]++
+		dst.n++
+	}
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+	t.tokens[t.n] = token(lit)
+	t.litHist[lit]++
+	t.n++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+	ux := int32(math.Float32bits(val))
+	log2 := (float32)(((ux >> 23) & 255) - 128)
+	ux &= -0x7f800001
+	ux += 127 << 23
+	uval := math.Float32frombits(uint32(ux))
+	log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+	return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+	shannon := float32(0)
+	bits := int(0)
+	nMatches := 0
+	total := int(t.n) + t.nFilled
+	if total > 0 {
+		invTotal := 1.0 / float32(total)
+		for _, v := range t.litHist[:] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+			}
+		}
+		// Just add 15 for EOB
+		shannon += 15
+		for i, v := range t.extraHist[1 : literalCount-256] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+				bits += int(lengthExtraBits[i&31]) * int(v)
+				nMatches += int(v)
+			}
+		}
+	}
+	if nMatches > 0 {
+		invTotal := 1.0 / float32(nMatches)
+		for i, v := range t.offHist[:offsetCodeCount] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+				bits += int(offsetExtraBits[i&31]) * int(v)
+			}
+		}
+	}
+	return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+	if debugDeflate {
+		if xlength >= maxMatchLength+baseMatchLength {
+			panic(fmt.Errorf("invalid length: %v", xlength))
+		}
+		if xoffset >= maxMatchOffset+baseMatchOffset {
+			panic(fmt.Errorf("invalid offset: %v", xoffset))
+		}
+	}
+	oCode := offsetCode(xoffset)
+	xoffset |= oCode << 16
+
+	t.extraHist[lengthCodes1[uint8(xlength)]]++
+	t.offHist[oCode&31]++
+	t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
+	t.n++
+}
+
+// AddMatchLong adds a match to the tokens, potentially longer than max match length.
+// Length should NOT have the base subtracted, only offset should.
+func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
+	if debugDeflate {
+		if xoffset >= maxMatchOffset+baseMatchOffset {
+			panic(fmt.Errorf("invalid offset: %v", xoffset))
+		}
+	}
+	oc := offsetCode(xoffset)
+	xoffset |= oc << 16
+	for xlength > 0 {
+		xl := xlength
+		if xl > 258 {
+			// We need to have at least baseMatchLength left over for next loop.
+			if xl > 258+baseMatchLength {
+				xl = 258
+			} else {
+				xl = 258 - baseMatchLength
+			}
+		}
+		xlength -= xl
+		xl -= baseMatchLength
+		t.extraHist[lengthCodes1[uint8(xl)]]++
+		t.offHist[oc&31]++
+		t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+		t.n++
+	}
+}
+
+func (t *tokens) AddEOB() {
+	t.tokens[t.n] = token(endBlockMarker)
+	t.extraHist[0]++
+	t.n++
+}
+
+func (t *tokens) Slice() []token {
+	return t.tokens[:t.n]
+}
+
+// VarInt returns the tokens as varint encoded bytes.
+func (t *tokens) VarInt() []byte {
+	var b = make([]byte, binary.MaxVarintLen32*int(t.n))
+	var off int
+	for _, v := range t.tokens[:t.n] {
+		off += binary.PutUvarint(b[off:], uint64(v))
+	}
+	return b[:off]
+}
+
+// FromVarInt restores t to the varint encoded tokens provided.
+// Any data in t is removed.
+func (t *tokens) FromVarInt(b []byte) error {
+	var buf = bytes.NewReader(b)
+	var toks []token
+	for {
+		r, err := binary.ReadUvarint(buf)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		toks = append(toks, token(r))
+	}
+	t.indexTokens(toks)
+	return nil
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint8 { return uint8(t) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint8 { return uint8(t >> lengthShift) }
+
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+	if false {
+		if off < uint32(len(offsetCodes)) {
+			return offsetCodes[off&255]
+		} else if off>>7 < uint32(len(offsetCodes)) {
+			return offsetCodes[(off>>7)&255] + 14
+		} else {
+			return offsetCodes[(off>>14)&255] + 28
+		}
+	}
+	if off < uint32(len(offsetCodes)) {
+		return offsetCodes[uint8(off)]
+	}
+	return offsetCodes14[uint8(off>>7)]
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
index 43e4636..e82fa3b 100644
--- a/vendor/github.com/klauspost/compress/fse/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -152,12 +152,11 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
 
 // reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 6f34191..074018d 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -146,54 +146,51 @@
 		c1.encodeZero(tt[src[ip-2]])
 		ip -= 2
 	}
+	src = src[:ip]
 
 	// Main compression loop.
 	switch {
 	case !s.zeroBits && s.actualTableLog <= 8:
 		// We can encode 4 symbols without requiring a flush.
 		// We do not need to check if any output is 0 bits.
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encode(tt[v0])
 			c1.encode(tt[v1])
 			c2.encode(tt[v2])
 			c1.encode(tt[v3])
-			ip -= 4
 		}
 	case !s.zeroBits:
 		// We do not need to check if any output is 0 bits.
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encode(tt[v0])
 			c1.encode(tt[v1])
 			s.bw.flush32()
 			c2.encode(tt[v2])
 			c1.encode(tt[v3])
-			ip -= 4
 		}
 	case s.actualTableLog <= 8:
 		// We can encode 4 symbols without requiring a flush
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encodeZero(tt[v0])
 			c1.encodeZero(tt[v1])
 			c2.encodeZero(tt[v2])
 			c1.encodeZero(tt[v3])
-			ip -= 4
 		}
 	default:
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encodeZero(tt[v0])
 			c1.encodeZero(tt[v1])
 			s.bw.flush32()
 			c2.encodeZero(tt[v2])
 			c1.encodeZero(tt[v3])
-			ip -= 4
 		}
 	}
 
@@ -202,7 +199,8 @@
 	c2.flush(s.actualTableLog)
 	c1.flush(s.actualTableLog)
 
-	return s.bw.close()
+	s.bw.close()
+	return nil
 }
 
 // writeCount will write the normalized histogram count to header.
@@ -214,7 +212,7 @@
 		previous0 bool
 		charnum   uint16
 
-		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+		maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
 
 		// Write Table Size
 		bitStream = uint32(tableLog - minTablelog)
@@ -459,15 +457,17 @@
 	for _, v := range in {
 		s.count[v]++
 	}
-	m := uint32(0)
+	m, symlen := uint32(0), s.symbolLen
 	for i, v := range s.count[:] {
+		if v == 0 {
+			continue
+		}
 		if v > m {
 			m = v
 		}
-		if v > 0 {
-			s.symbolLen = uint16(i) + 1
-		}
+		symlen = uint16(i) + 1
 	}
+	s.symbolLen = symlen
 	return int(m)
 }
 
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index 926f5f1..0c7dd4f 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@
 // It is possible, but by no way guaranteed that corrupt data will
 // return an error.
 // It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
 func Decompress(b []byte, s *Scratch) ([]byte, error) {
 	s, err := s.prepare(b)
 	if err != nil {
@@ -260,7 +260,9 @@
 // If the buffer is over-read an error is returned.
 func (s *Scratch) decompress() error {
 	br := &s.bits
-	br.init(s.br.unread())
+	if err := br.init(s.br.unread()); err != nil {
+		return err
+	}
 
 	var s1, s2 decoder
 	// Initialize and decode first state and symbol.
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
new file mode 100644
index 0000000..00a0a2c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -0,0 +1,380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+	"bufio"
+	"compress/gzip"
+	"encoding/binary"
+	"hash/crc32"
+	"io"
+	"time"
+
+	"github.com/klauspost/compress/flate"
+)
+
+const (
+	gzipID1     = 0x1f
+	gzipID2     = 0x8b
+	gzipDeflate = 8
+	flagText    = 1 << 0
+	flagHdrCrc  = 1 << 1
+	flagExtra   = 1 << 2
+	flagName    = 1 << 3
+	flagComment = 1 << 4
+)
+
+var (
+	// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+	ErrChecksum = gzip.ErrChecksum
+	// ErrHeader is returned when reading GZIP data that has an invalid header.
+	ErrHeader = gzip.ErrHeader
+)
+
+var le = binary.LittleEndian
+
+// noEOF converts io.EOF to io.ErrUnexpectedEOF.
+func noEOF(err error) error {
+	if err == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	return err
+}
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+//
+// Strings must be UTF-8 encoded and may only contain Unicode code points
+// U+0001 through U+00FF, due to limitations of the GZIP file format.
+type Header struct {
+	Comment string    // comment
+	Extra   []byte    // "extra data"
+	ModTime time.Time // modification time
+	Name    string    // file name
+	OS      byte      // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+	Header       // valid after NewReader or Reader.Reset
+	r            flate.Reader
+	br           *bufio.Reader
+	decompressor io.ReadCloser
+	digest       uint32 // CRC-32, IEEE polynomial (section 8)
+	size         uint32 // Uncompressed size (section 2.3.1)
+	buf          [512]byte
+	err          error
+	multistream  bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+//
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// The Reader.Header fields will be valid in the Reader returned.
+func NewReader(r io.Reader) (*Reader, error) {
+	z := new(Reader)
+	if err := z.Reset(r); err != nil {
+		return nil, err
+	}
+	return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+	*z = Reader{
+		decompressor: z.decompressor,
+		multistream:  true,
+		br:           z.br,
+	}
+	if rr, ok := r.(flate.Reader); ok {
+		z.r = rr
+	} else {
+		// Reuse if we can.
+		if z.br != nil {
+			z.br.Reset(r)
+		} else {
+			z.br = bufio.NewReader(r)
+		}
+		z.r = z.br
+	}
+	z.Header, z.err = z.readHeader()
+	return z.err
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+	z.multistream = ok
+}
+
+// readString reads a NUL-terminated string from z.r.
+// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
+// will output a string encoded using UTF-8.
+// This method always updates z.digest with the data read.
+func (z *Reader) readString() (string, error) {
+	var err error
+	needConv := false
+	for i := 0; ; i++ {
+		if i >= len(z.buf) {
+			return "", ErrHeader
+		}
+		z.buf[i], err = z.r.ReadByte()
+		if err != nil {
+			return "", err
+		}
+		if z.buf[i] > 0x7f {
+			needConv = true
+		}
+		if z.buf[i] == 0 {
+			// Digest covers the NUL terminator.
+			z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
+
+			// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
+			if needConv {
+				s := make([]rune, 0, i)
+				for _, v := range z.buf[:i] {
+					s = append(s, rune(v))
+				}
+				return string(s), nil
+			}
+			return string(z.buf[:i]), nil
+		}
+	}
+}
+
+// readHeader reads the GZIP header according to section 2.3.1.
+// This method does not set z.err.
+func (z *Reader) readHeader() (hdr Header, err error) {
+	if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
+		// RFC 1952, section 2.2, says the following:
+		//	A gzip file consists of a series of "members" (compressed data sets).
+		//
+		// Other than this, the specification does not clarify whether a
+		// "series" is defined as "one or more" or "zero or more". To err on the
+		// side of caution, Go interprets this to mean "zero or more".
+		// Thus, it is okay to return io.EOF here.
+		return hdr, err
+	}
+	if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+		return hdr, ErrHeader
+	}
+	flg := z.buf[3]
+	hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
+	// z.buf[8] is XFL and is currently ignored.
+	hdr.OS = z.buf[9]
+	z.digest = crc32.ChecksumIEEE(z.buf[:10])
+
+	if flg&flagExtra != 0 {
+		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+			return hdr, noEOF(err)
+		}
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
+		data := make([]byte, le.Uint16(z.buf[:2]))
+		if _, err = io.ReadFull(z.r, data); err != nil {
+			return hdr, noEOF(err)
+		}
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
+		hdr.Extra = data
+	}
+
+	var s string
+	if flg&flagName != 0 {
+		if s, err = z.readString(); err != nil {
+			return hdr, err
+		}
+		hdr.Name = s
+	}
+
+	if flg&flagComment != 0 {
+		if s, err = z.readString(); err != nil {
+			return hdr, err
+		}
+		hdr.Comment = s
+	}
+
+	if flg&flagHdrCrc != 0 {
+		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+			return hdr, noEOF(err)
+		}
+		digest := le.Uint16(z.buf[:2])
+		if digest != uint16(z.digest) {
+			return hdr, ErrHeader
+		}
+	}
+
+	// Reserved FLG bits must be zero.
+	if flg>>5 != 0 {
+		return hdr, ErrHeader
+	}
+
+	z.digest = 0
+	if z.decompressor == nil {
+		z.decompressor = flate.NewReader(z.r)
+	} else {
+		z.decompressor.(flate.Resetter).Reset(z.r, nil)
+	}
+	return hdr, nil
+}
+
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+func (z *Reader) Read(p []byte) (n int, err error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+
+	for n == 0 {
+		n, z.err = z.decompressor.Read(p)
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+		z.size += uint32(n)
+		if z.err != io.EOF {
+			// In the normal case we return here.
+			return n, z.err
+		}
+
+		// Finished file; check checksum and size.
+		if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+			z.err = noEOF(err)
+			return n, z.err
+		}
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return n, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return n, io.EOF
+		}
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			return n, z.err
+		}
+	}
+
+	return n, nil
+}
+
+type crcer interface {
+	io.Writer
+	Sum32() uint32
+	Reset()
+}
+type crcUpdater struct {
+	z *Reader
+}
+
+func (c *crcUpdater) Write(p []byte) (int, error) {
+	c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p)
+	return len(p), nil
+}
+
+func (c *crcUpdater) Sum32() uint32 {
+	return c.z.digest
+}
+
+func (c *crcUpdater) Reset() {
+	c.z.digest = 0
+}
+
+// WriteTo support the io.WriteTo interface for io.Copy and friends.
+func (z *Reader) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	crcWriter := crcer(crc32.NewIEEE())
+	if z.digest != 0 {
+		crcWriter = &crcUpdater{z: z}
+	}
+	for {
+		if z.err != nil {
+			if z.err == io.EOF {
+				return total, nil
+			}
+			return total, z.err
+		}
+
+		// We write both to output and digest.
+		mw := io.MultiWriter(w, crcWriter)
+		n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
+		total += n
+		z.size += uint32(n)
+		if err != nil {
+			z.err = err
+			return total, z.err
+		}
+
+		// Finished file; check checksum + size.
+		if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+			if err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			z.err = err
+			return total, err
+		}
+		z.digest = crcWriter.Sum32()
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return total, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return total, nil
+		}
+		crcWriter.Reset()
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			if z.err == io.EOF {
+				return total, nil
+			}
+			return total, z.err
+		}
+	}
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+// In order for the GZIP checksum to be verified, the reader must be
+// fully consumed until the io.EOF.
+func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go
new file mode 100644
index 0000000..5bc7205
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go
@@ -0,0 +1,290 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gzip
+
+import (
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+
+	"github.com/klauspost/compress/flate"
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+	NoCompression       = flate.NoCompression
+	BestSpeed           = flate.BestSpeed
+	BestCompression     = flate.BestCompression
+	DefaultCompression  = flate.DefaultCompression
+	ConstantCompression = flate.ConstantCompression
+	HuffmanOnly         = flate.HuffmanOnly
+
+	// StatelessCompression will do compression but without maintaining any state
+	// between Write calls.
+	// There will be no memory kept between Write calls,
+	// but compression and speed will be suboptimal.
+	// Because of this, the size of actual Write calls will affect output size.
+	StatelessCompression = -3
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+	Header      // written at first call to Write, Flush, or Close
+	w           io.Writer
+	level       int
+	err         error
+	compressor  *flate.Writer
+	digest      uint32 // CRC-32, IEEE polynomial (section 8)
+	size        uint32 // Uncompressed size (section 2.3.1)
+	wroteHeader bool
+	closed      bool
+	buf         [10]byte
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write, Flush, or Close.
+func NewWriter(w io.Writer) *Writer {
+	z, _ := NewWriterLevel(w, DefaultCompression)
+	return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be DefaultCompression, NoCompression, or any
+// integer value between BestSpeed and BestCompression inclusive. The error
+// returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+	if level < StatelessCompression || level > BestCompression {
+		return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+	}
+	z := new(Writer)
+	z.init(w, level)
+	return z, nil
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = flate.MinCustomWindowSize
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = flate.MaxCustomWindowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+	if windowSize < MinCustomWindowSize {
+		return nil, errors.New("gzip: requested window size less than MinWindowSize")
+	}
+	if windowSize > MaxCustomWindowSize {
+		return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize")
+	}
+
+	z := new(Writer)
+	z.init(w, -windowSize)
+	return z, nil
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+	compressor := z.compressor
+	if level != StatelessCompression {
+		if compressor != nil {
+			compressor.Reset(w)
+		}
+	}
+
+	*z = Writer{
+		Header: Header{
+			OS: 255, // unknown
+		},
+		w:          w,
+		level:      level,
+		compressor: compressor,
+	}
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+	z.init(w, z.level)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+	if len(b) > 0xffff {
+		return errors.New("gzip.Write: Extra data is too large")
+	}
+	le.PutUint16(z.buf[:2], uint16(len(b)))
+	_, err := z.w.Write(z.buf[:2])
+	if err != nil {
+		return err
+	}
+	_, err = z.w.Write(b)
+	return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+	// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+	needconv := false
+	for _, v := range s {
+		if v == 0 || v > 0xff {
+			return errors.New("gzip.Write: non-Latin-1 header string")
+		}
+		if v > 0x7f {
+			needconv = true
+		}
+	}
+	if needconv {
+		b := make([]byte, 0, len(s))
+		for _, v := range s {
+			b = append(b, byte(v))
+		}
+		_, err = z.w.Write(b)
+	} else {
+		_, err = io.WriteString(z.w, s)
+	}
+	if err != nil {
+		return err
+	}
+	// GZIP strings are NUL-terminated.
+	z.buf[0] = 0
+	_, err = z.w.Write(z.buf[:1])
+	return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed until the Writer is closed.
+func (z *Writer) Write(p []byte) (int, error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+	var n int
+	// Write the GZIP header lazily.
+	if !z.wroteHeader {
+		z.wroteHeader = true
+		z.buf[0] = gzipID1
+		z.buf[1] = gzipID2
+		z.buf[2] = gzipDeflate
+		z.buf[3] = 0
+		if z.Extra != nil {
+			z.buf[3] |= 0x04
+		}
+		if z.Name != "" {
+			z.buf[3] |= 0x08
+		}
+		if z.Comment != "" {
+			z.buf[3] |= 0x10
+		}
+		le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+		if z.level == BestCompression {
+			z.buf[8] = 2
+		} else if z.level == BestSpeed {
+			z.buf[8] = 4
+		} else {
+			z.buf[8] = 0
+		}
+		z.buf[9] = z.OS
+		n, z.err = z.w.Write(z.buf[:10])
+		if z.err != nil {
+			return n, z.err
+		}
+		if z.Extra != nil {
+			z.err = z.writeBytes(z.Extra)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Name != "" {
+			z.err = z.writeString(z.Name)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Comment != "" {
+			z.err = z.writeString(z.Comment)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+
+		if z.compressor == nil && z.level != StatelessCompression {
+			z.compressor, _ = flate.NewWriter(z.w, z.level)
+		}
+	}
+	z.size += uint32(len(p))
+	z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
+	if z.level == StatelessCompression {
+		return len(p), flate.StatelessDeflate(z.w, p, false, nil)
+	}
+	n, z.err = z.compressor.Write(p)
+	return n, z.err
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed || z.level == StatelessCompression {
+		return nil
+	}
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	z.err = z.compressor.Flush()
+	return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed {
+		return nil
+	}
+	z.closed = true
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	if z.level == StatelessCompression {
+		z.err = flate.StatelessDeflate(z.w, nil, true, nil)
+	} else {
+		z.err = z.compressor.Close()
+	}
+	if z.err != nil {
+		return z.err
+	}
+	le.PutUint32(z.buf[:4], z.digest)
+	le.PutUint32(z.buf[4:8], z.size)
+	_, z.err = z.w.Write(z.buf[:8])
+	return z.err
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 504a7be..bfc7a52 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
 package huff0
 
 import (
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@
 	return nil
 }
 
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
 // There are no checks if the buffer is filled.
 func (b *bitReaderBytes) peekByteFast() uint8 {
 	got := uint8(b.value >> 56)
@@ -66,9 +67,7 @@
 	}
 
 	// 2 bounds checks.
-	v := b.in[b.off-4 : b.off]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	low := le.Load32(b.in, b.off-4)
 	b.value |= uint64(low) << (b.bitsRead - 32)
 	b.bitsRead -= 32
 	b.off -= 4
@@ -77,7 +76,7 @@
 // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
 func (b *bitReaderBytes) fillFastStart() {
 	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.value = le.Load64(b.in, b.off-8)
 	b.bitsRead = 0
 	b.off -= 8
 }
@@ -87,10 +86,8 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	if b.off > 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	if b.off >= 4 {
+		low := le.Load32(b.in, b.off-4)
 		b.value |= uint64(low) << (b.bitsRead - 32)
 		b.bitsRead -= 32
 		b.off -= 4
@@ -177,10 +174,7 @@
 		return
 	}
 
-	// 2 bounds checks.
-	v := b.in[b.off-4 : b.off]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	low := le.Load32(b.in, b.off-4)
 	b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
 	b.bitsRead -= 32
 	b.off -= 4
@@ -188,8 +182,7 @@
 
 // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
 func (b *bitReaderShifted) fillFastStart() {
-	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.value = le.Load64(b.in, b.off-8)
 	b.bitsRead = 0
 	b.off -= 8
 }
@@ -200,9 +193,7 @@
 		return
 	}
 	if b.off > 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		low := le.Load32(b.in, b.off-4)
 		b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
 		b.bitsRead -= 32
 		b.off -= 4
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec71f7a..0ebc9aa 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -13,14 +13,6 @@
 	out          []byte
 }
 
-// bitMask16 is bitmasks. Has extra to avoid bounds check.
-var bitMask16 = [32]uint16{
-	0, 1, 3, 7, 0xF, 0x1F,
-	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
-	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF} /* up to 16 bits */
-
 // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
 // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -60,6 +52,22 @@
 	b.nBits += encA.nBits + encB.nBits
 }
 
+// encFourSymbols adds up to 32 bits from four symbols.
+// It will not check if there is space for them,
+// so the caller must ensure that b has been flushed recently.
+func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
+	bitsA := encA.nBits
+	bitsB := bitsA + encB.nBits
+	bitsC := bitsB + encC.nBits
+	bitsD := bitsC + encD.nBits
+	combined := uint64(encA.val) |
+		(uint64(encB.val) << (bitsA & 63)) |
+		(uint64(encC.val) << (bitsB & 63)) |
+		(uint64(encD.val) << (bitsC & 63))
+	b.bitContainer |= combined << (b.nBits & 63)
+	b.nBits += bitsD
+}
+
 // flush32 will flush out, so there are at least 32 bits available for writing.
 func (b *bitWriter) flush32() {
 	if b.nBits < 32 {
@@ -86,10 +94,9 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
deleted file mode 100644
index 4dcab8d..0000000
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Klaus Post. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
-
-package huff0
-
-// byteReader provides a byte reader that reads
-// little endian values from a byte stream.
-// The input stream is manually advanced.
-// The reader performs no bounds checks.
-type byteReader struct {
-	b   []byte
-	off int
-}
-
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
-	b.b = in
-	b.off = 0
-}
-
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
-	v3 := int32(b.b[b.off+3])
-	v2 := int32(b.b[b.off+2])
-	v1 := int32(b.b[b.off+1])
-	v0 := int32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// Uint32 returns a little endian uint32 starting at current offset.
-func (b byteReader) Uint32() uint32 {
-	v3 := uint32(b.b[b.off+3])
-	v2 := uint32(b.b[b.off+2])
-	v1 := uint32(b.b[b.off+1])
-	v0 := uint32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// remain will return the number of bytes remaining.
-func (b byteReader) remain() int {
-	return len(b.b) - b.off
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 4d14542..84aa3d1 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -227,10 +227,10 @@
 }
 
 func (s *Scratch) compress1X(src []byte) ([]byte, error) {
-	return s.compress1xDo(s.Out, src)
+	return s.compress1xDo(s.Out, src), nil
 }
 
-func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+func (s *Scratch) compress1xDo(dst, src []byte) []byte {
 	var bw = bitWriter{out: dst}
 
 	// N is length divisible by 4.
@@ -248,8 +248,7 @@
 			tmp := src[n : n+4]
 			// tmp should be len 4
 			bw.flush32()
-			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
-			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+			bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
 		}
 	} else {
 		for ; n >= 0; n -= 4 {
@@ -261,8 +260,8 @@
 			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 		}
 	}
-	err := bw.close()
-	return bw.out, err
+	bw.close()
+	return bw.out
 }
 
 var sixZeros [6]byte
@@ -284,12 +283,8 @@
 		}
 		src = src[len(toDo):]
 
-		var err error
 		idx := len(s.Out)
-		s.Out, err = s.compress1xDo(s.Out, toDo)
-		if err != nil {
-			return nil, err
-		}
+		s.Out = s.compress1xDo(s.Out, toDo)
 		if len(s.Out)-idx > math.MaxUint16 {
 			// We cannot store the size in the jump table
 			return nil, ErrIncompressible
@@ -316,7 +311,6 @@
 
 	segmentSize := (len(src) + 3) / 4
 	var wg sync.WaitGroup
-	var errs [4]error
 	wg.Add(4)
 	for i := 0; i < 4; i++ {
 		toDo := src
@@ -327,15 +321,12 @@
 
 		// Separate goroutine for each block.
 		go func(i int) {
-			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
 			wg.Done()
 		}(i)
 	}
 	wg.Wait()
 	for i := 0; i < 4; i++ {
-		if errs[i] != nil {
-			return nil, errs[i]
-		}
 		o := s.tmpOut[i]
 		if len(o) > math.MaxUint16 {
 			// We cannot store the size in the jump table
@@ -359,35 +350,36 @@
 // Does not update s.clearCount.
 func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
 	reuse = true
+	_ = s.count // Assert that s != nil to speed up the following loop.
 	for _, v := range in {
 		s.count[v]++
 	}
 	m := uint32(0)
 	if len(s.prevTable) > 0 {
 		for i, v := range s.count[:] {
+			if v == 0 {
+				continue
+			}
 			if v > m {
 				m = v
 			}
-			if v > 0 {
-				s.symbolLen = uint16(i) + 1
-				if i >= len(s.prevTable) {
-					reuse = false
-				} else {
-					if s.prevTable[i].nBits == 0 {
-						reuse = false
-					}
-				}
+			s.symbolLen = uint16(i) + 1
+			if i >= len(s.prevTable) {
+				reuse = false
+			} else if s.prevTable[i].nBits == 0 {
+				reuse = false
 			}
 		}
 		return int(m), reuse
 	}
 	for i, v := range s.count[:] {
+		if v == 0 {
+			continue
+		}
 		if v > m {
 			m = v
 		}
-		if v > 0 {
-			s.symbolLen = uint16(i) + 1
-		}
+		s.symbolLen = uint16(i) + 1
 	}
 	return int(m), false
 }
@@ -424,7 +416,7 @@
 
 // minTableLog provides the minimum logSize to safely represent a distribution.
 func (s *Scratch) minTableLog() uint8 {
-	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSrc := highBit32(uint32(s.srcLen)) + 1
 	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
 	if minBitsSrc < minBitsSymbols {
 		return uint8(minBitsSrc)
@@ -436,7 +428,7 @@
 func (s *Scratch) optimalTableLog() {
 	tableLog := s.TableLog
 	minBits := s.minTableLog()
-	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
 	if maxBitsSrc < tableLog {
 		// Accuracy can be reduced
 		tableLog = maxBitsSrc
@@ -484,34 +476,35 @@
 	// Different from reference implementation.
 	huffNode0 := s.nodes[0 : huffNodesLen+1]
 
-	for huffNode[nonNullRank].count == 0 {
+	for huffNode[nonNullRank].count() == 0 {
 		nonNullRank--
 	}
 
 	lowS := int16(nonNullRank)
 	nodeRoot := nodeNb + lowS - 1
 	lowN := nodeNb
-	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
-	huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+	huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
+	huffNode[lowS].setParent(nodeNb)
+	huffNode[lowS-1].setParent(nodeNb)
 	nodeNb++
 	lowS -= 2
 	for n := nodeNb; n <= nodeRoot; n++ {
-		huffNode[n].count = 1 << 30
+		huffNode[n].setCount(1 << 30)
 	}
 	// fake entry, strong barrier
-	huffNode0[0].count = 1 << 31
+	huffNode0[0].setCount(1 << 31)
 
 	// create parents
 	for nodeNb <= nodeRoot {
 		var n1, n2 int16
-		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+		if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
 			n1 = lowS
 			lowS--
 		} else {
 			n1 = lowN
 			lowN++
 		}
-		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+		if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
 			n2 = lowS
 			lowS--
 		} else {
@@ -519,18 +512,19 @@
 			lowN++
 		}
 
-		huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
-		huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+		huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
+		huffNode0[n1+1].setParent(nodeNb)
+		huffNode0[n2+1].setParent(nodeNb)
 		nodeNb++
 	}
 
 	// distribute weights (unlimited tree height)
-	huffNode[nodeRoot].nbBits = 0
+	huffNode[nodeRoot].setNbBits(0)
 	for n := nodeRoot - 1; n >= startNode; n-- {
-		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+		huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
 	}
 	for n := uint16(0); n <= nonNullRank; n++ {
-		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+		huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
 	}
 	s.actualTableLog = s.setMaxHeight(int(nonNullRank))
 	maxNbBits := s.actualTableLog
@@ -542,7 +536,7 @@
 	var nbPerRank [tableLogMax + 1]uint16
 	var valPerRank [16]uint16
 	for _, v := range huffNode[:nonNullRank+1] {
-		nbPerRank[v.nbBits]++
+		nbPerRank[v.nbBits()]++
 	}
 	// determine stating value per rank
 	{
@@ -557,7 +551,7 @@
 
 	// push nbBits per symbol, symbol order
 	for _, v := range huffNode[:nonNullRank+1] {
-		s.cTable[v.symbol].nBits = v.nbBits
+		s.cTable[v.symbol()].nBits = v.nbBits()
 	}
 
 	// assign value within rank, symbol order
@@ -603,12 +597,12 @@
 		pos := rank[r].current
 		rank[r].current++
 		prev := nodes[(pos-1)&huffNodesMask]
-		for pos > rank[r].base && c > prev.count {
+		for pos > rank[r].base && c > prev.count() {
 			nodes[pos&huffNodesMask] = prev
 			pos--
 			prev = nodes[(pos-1)&huffNodesMask]
 		}
-		nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+		nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
 	}
 }
 
@@ -617,7 +611,7 @@
 	huffNode := s.nodes[1 : huffNodesLen+1]
 	//huffNode = huffNode[: huffNodesLen]
 
-	largestBits := huffNode[lastNonNull].nbBits
+	largestBits := huffNode[lastNonNull].nbBits()
 
 	// early exit : no elt > maxNbBits
 	if largestBits <= maxNbBits {
@@ -627,14 +621,14 @@
 	baseCost := int(1) << (largestBits - maxNbBits)
 	n := uint32(lastNonNull)
 
-	for huffNode[n].nbBits > maxNbBits {
-		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
-		huffNode[n].nbBits = maxNbBits
+	for huffNode[n].nbBits() > maxNbBits {
+		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
+		huffNode[n].setNbBits(maxNbBits)
 		n--
 	}
 	// n stops at huffNode[n].nbBits <= maxNbBits
 
-	for huffNode[n].nbBits == maxNbBits {
+	for huffNode[n].nbBits() == maxNbBits {
 		n--
 	}
 	// n end at index of smallest symbol using < maxNbBits
@@ -655,10 +649,10 @@
 		{
 			currentNbBits := maxNbBits
 			for pos := int(n); pos >= 0; pos-- {
-				if huffNode[pos].nbBits >= currentNbBits {
+				if huffNode[pos].nbBits() >= currentNbBits {
 					continue
 				}
-				currentNbBits = huffNode[pos].nbBits // < maxNbBits
+				currentNbBits = huffNode[pos].nbBits() // < maxNbBits
 				rankLast[maxNbBits-currentNbBits] = uint32(pos)
 			}
 		}
@@ -675,8 +669,8 @@
 				if lowPos == noSymbol {
 					break
 				}
-				highTotal := huffNode[highPos].count
-				lowTotal := 2 * huffNode[lowPos].count
+				highTotal := huffNode[highPos].count()
+				lowTotal := 2 * huffNode[lowPos].count()
 				if highTotal <= lowTotal {
 					break
 				}
@@ -692,13 +686,14 @@
 				// this rank is no longer empty
 				rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
 			}
-			huffNode[rankLast[nBitsToDecrease]].nbBits++
+			huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
+				huffNode[rankLast[nBitsToDecrease]].nbBits())
 			if rankLast[nBitsToDecrease] == 0 {
 				/* special case, reached largest symbol */
 				rankLast[nBitsToDecrease] = noSymbol
 			} else {
 				rankLast[nBitsToDecrease]--
-				if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+				if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
 					rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
 				}
 			}
@@ -706,15 +701,15 @@
 
 		for totalCost < 0 { /* Sometimes, cost correction overshoot */
 			if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
-				for huffNode[n].nbBits == maxNbBits {
+				for huffNode[n].nbBits() == maxNbBits {
 					n--
 				}
-				huffNode[n+1].nbBits--
+				huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
 				rankLast[1] = n + 1
 				totalCost++
 				continue
 			}
-			huffNode[rankLast[1]+1].nbBits--
+			huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
 			rankLast[1]++
 			totalCost++
 		}
@@ -722,9 +717,26 @@
 	return maxNbBits
 }
 
-type nodeElt struct {
-	count  uint32
-	parent uint16
-	symbol byte
-	nbBits uint8
+// A nodeElt is the fields
+//
+//	count  uint32
+//	parent uint16
+//	symbol byte
+//	nbBits uint8
+//
+// in some order, all squashed into an integer so that the compiler
+// always loads and stores entire nodeElts instead of separate fields.
+type nodeElt uint64
+
+func makeNodeElt(count uint32, symbol byte) nodeElt {
+	return nodeElt(count) | nodeElt(symbol)<<48
 }
+
+func (e *nodeElt) count() uint32  { return uint32(*e) }
+func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
+func (e *nodeElt) symbol() byte   { return byte(*e >> 48) }
+func (e *nodeElt) nbBits() uint8  { return uint8(*e >> 56) }
+
+func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
+func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
+func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index c0c48bd..0f56b02 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -61,7 +61,7 @@
 		b, err := fse.Decompress(in[:iSize], s.fse)
 		s.fse.Out = nil
 		if err != nil {
-			return s, nil, err
+			return s, nil, fmt.Errorf("fse decompress returned: %w", err)
 		}
 		if len(b) > 255 {
 			return s, nil, errors.New("corrupt input: output table too large")
@@ -253,7 +253,7 @@
 
 	switch d.actualTableLog {
 	case 8:
-		const shift = 8 - 8
+		const shift = 0
 		for br.off >= 4 {
 			br.fillFast()
 			v := dt[uint8(br.value>>(56+shift))]
@@ -763,17 +763,20 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
@@ -997,17 +1000,22 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			// copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
@@ -1128,7 +1136,7 @@
 			errs++
 		}
 		if errs > 0 {
-			fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+			fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
 			continue
 		}
 		// Ensure that all combinations are covered.
@@ -1144,7 +1152,7 @@
 				errs++
 			}
 			if errs > 20 {
-				fmt.Fprintf(w, "%d errros, stopping\n", errs)
+				fmt.Fprintf(w, "%d errors, stopping\n", errs)
 				break
 			}
 		}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 9f3e9f7..ba7e8e6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -14,12 +14,14 @@
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog > 8.
+//
 //go:noescape
 func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 
 // decompress4x_8b_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog <= 8 which decodes 4 entries
 // per loop.
+//
 //go:noescape
 func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 
@@ -145,11 +147,13 @@
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 
 // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index dd1a5ae..c4c7ab2 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,364 +1,352 @@
 // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
 
 //go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
 
 // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_main_loop_amd64(SB), $0-8
-	XORQ DX, DX
-
 	// Preload values
 	MOVQ    ctx+0(FP), AX
 	MOVBQZX 8(AX), DI
-	MOVQ    16(AX), SI
-	MOVQ    48(AX), BX
-	MOVQ    24(AX), R9
-	MOVQ    32(AX), R10
-	MOVQ    (AX), R11
+	MOVQ    16(AX), BX
+	MOVQ    48(AX), SI
+	MOVQ    24(AX), R8
+	MOVQ    32(AX), R9
+	MOVQ    (AX), R10
 
 	// Main loop
 main_loop:
-	MOVQ  SI, R8
-	CMPQ  R8, BX
+	XORL  DX, DX
+	CMPQ  BX, SI
 	SETGE DL
 
 	// br0.fillFast32()
-	MOVQ    32(R11), R12
-	MOVBQZX 40(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    32(R10), R11
+	MOVBQZX 40(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill0
-	MOVQ    24(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    24(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    (R11), R14
+	MOVQ    (R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 24(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 24(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br0.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br0.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill0:
 	// val0 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br0.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)
 
 	// update the bitreader structure
-	MOVQ R12, 32(R11)
-	MOVB R13, 40(R11)
-	ADDQ R9, R8
+	MOVQ R11, 32(R10)
+	MOVB R12, 40(R10)
 
 	// br1.fillFast32()
-	MOVQ    80(R11), R12
-	MOVBQZX 88(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    80(R10), R11
+	MOVBQZX 88(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill1
-	MOVQ    72(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    72(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    48(R11), R14
+	MOVQ    48(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 72(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 72(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br1.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br1.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill1:
 	// val0 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br1.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)(R8*1)
 
 	// update the bitreader structure
-	MOVQ R12, 80(R11)
-	MOVB R13, 88(R11)
-	ADDQ R9, R8
+	MOVQ R11, 80(R10)
+	MOVB R12, 88(R10)
 
 	// br2.fillFast32()
-	MOVQ    128(R11), R12
-	MOVBQZX 136(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    128(R10), R11
+	MOVBQZX 136(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill2
-	MOVQ    120(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    120(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    96(R11), R14
+	MOVQ    96(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 120(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 120(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br2.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br2.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill2:
 	// val0 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br2.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)(R8*2)
 
 	// update the bitreader structure
-	MOVQ R12, 128(R11)
-	MOVB R13, 136(R11)
-	ADDQ R9, R8
+	MOVQ R11, 128(R10)
+	MOVB R12, 136(R10)
 
 	// br3.fillFast32()
-	MOVQ    176(R11), R12
-	MOVBQZX 184(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    176(R10), R11
+	MOVBQZX 184(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill3
-	MOVQ    168(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    168(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    144(R11), R14
+	MOVQ    144(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 168(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 168(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br3.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br3.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill3:
 	// val0 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br3.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	LEAQ (R8)(R8*2), CX
+	MOVW AX, (BX)(CX*1)
 
 	// update the bitreader structure
-	MOVQ  R12, 176(R11)
-	MOVB  R13, 184(R11)
-	ADDQ  $0x02, SI
+	MOVQ  R11, 176(R10)
+	MOVB  R12, 184(R10)
+	ADDQ  $0x02, BX
 	TESTB DL, DL
 	JZ    main_loop
 	MOVQ  ctx+0(FP), AX
-	SUBQ  16(AX), SI
-	SHLQ  $0x02, SI
-	MOVQ  SI, 40(AX)
+	SUBQ  16(AX), BX
+	SHLQ  $0x02, BX
+	MOVQ  BX, 40(AX)
 	RET
 
 // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
-	XORQ DX, DX
-
 	// Preload values
 	MOVQ    ctx+0(FP), CX
 	MOVBQZX 8(CX), DI
 	MOVQ    16(CX), BX
 	MOVQ    48(CX), SI
-	MOVQ    24(CX), R9
-	MOVQ    32(CX), R10
-	MOVQ    (CX), R11
+	MOVQ    24(CX), R8
+	MOVQ    32(CX), R9
+	MOVQ    (CX), R10
 
 	// Main loop
 main_loop:
-	MOVQ  BX, R8
-	CMPQ  R8, SI
+	XORL  DX, DX
+	CMPQ  BX, SI
 	SETGE DL
 
 	// br0.fillFast32()
-	MOVQ    32(R11), R12
-	MOVBQZX 40(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    32(R10), R11
+	MOVBQZX 40(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill0
-	MOVQ    24(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    (R11), R15
+	MOVQ    24(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    (R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 24(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 24(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br0.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br0.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill0:
 	// val0 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -366,88 +354,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)
 
 	// update the bitreader structure
-	MOVQ R12, 32(R11)
-	MOVB R13, 40(R11)
-	ADDQ R9, R8
+	MOVQ R11, 32(R10)
+	MOVB R12, 40(R10)
 
 	// br1.fillFast32()
-	MOVQ    80(R11), R12
-	MOVBQZX 88(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    80(R10), R11
+	MOVBQZX 88(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill1
-	MOVQ    72(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    48(R11), R15
+	MOVQ    72(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    48(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 72(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 72(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br1.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br1.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill1:
 	// val0 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -455,88 +441,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)(R8*1)
 
 	// update the bitreader structure
-	MOVQ R12, 80(R11)
-	MOVB R13, 88(R11)
-	ADDQ R9, R8
+	MOVQ R11, 80(R10)
+	MOVB R12, 88(R10)
 
 	// br2.fillFast32()
-	MOVQ    128(R11), R12
-	MOVBQZX 136(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    128(R10), R11
+	MOVBQZX 136(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill2
-	MOVQ    120(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    96(R11), R15
+	MOVQ    120(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    96(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 120(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 120(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br2.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br2.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill2:
 	// val0 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -544,88 +528,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)(R8*2)
 
 	// update the bitreader structure
-	MOVQ R12, 128(R11)
-	MOVB R13, 136(R11)
-	ADDQ R9, R8
+	MOVQ R11, 128(R10)
+	MOVB R12, 136(R10)
 
 	// br3.fillFast32()
-	MOVQ    176(R11), R12
-	MOVBQZX 184(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    176(R10), R11
+	MOVBQZX 184(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill3
-	MOVQ    168(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    144(R11), R15
+	MOVQ    168(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    144(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 168(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 168(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br3.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br3.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill3:
 	// val0 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -633,11 +615,12 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	LEAQ (R8)(R8*2), CX
+	MOVL AX, (BX)(CX*1)
 
 	// update the bitreader structure
-	MOVQ  R12, 176(R11)
-	MOVB  R13, 184(R11)
+	MOVQ  R11, 176(R10)
+	MOVB  R12, 184(R10)
 	ADDQ  $0x04, BX
 	TESTB DL, DL
 	JZ    main_loop
@@ -653,7 +636,7 @@
 	MOVQ    16(CX), DX
 	MOVQ    24(CX), BX
 	CMPQ    BX, $0x04
-	JB      error_max_decoded_size_exeeded
+	JB      error_max_decoded_size_exceeded
 	LEAQ    (DX)(BX*1), BX
 	MOVQ    (CX), SI
 	MOVQ    (SI), R8
@@ -668,7 +651,7 @@
 	// Check if we have room for 4 bytes in the output buffer
 	LEAQ 4(DX), CX
 	CMPQ CX, BX
-	JGE  error_max_decoded_size_exeeded
+	JGE  error_max_decoded_size_exceeded
 
 	// Decode 4 values
 	CMPQ R11, $0x20
@@ -745,7 +728,7 @@
 	RET
 
 	// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
 	MOVQ ctx+0(FP), AX
 	MOVQ $-1, CX
 	MOVQ CX, 40(AX)
@@ -758,7 +741,7 @@
 	MOVQ    16(CX), DX
 	MOVQ    24(CX), BX
 	CMPQ    BX, $0x04
-	JB      error_max_decoded_size_exeeded
+	JB      error_max_decoded_size_exceeded
 	LEAQ    (DX)(BX*1), BX
 	MOVQ    (CX), SI
 	MOVQ    (SI), R8
@@ -773,7 +756,7 @@
 	// Check if we have room for 4 bytes in the output buffer
 	LEAQ 4(DX), CX
 	CMPQ CX, BX
-	JGE  error_max_decoded_size_exeeded
+	JGE  error_max_decoded_size_exceeded
 
 	// Decode 4 values
 	CMPQ  R11, $0x20
@@ -840,7 +823,7 @@
 	RET
 
 	// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
 	MOVQ ctx+0(FP), AX
 	MOVQ $-1, CX
 	MOVQ CX, 40(AX)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 4f6f37c..908c17d 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			//copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index e8ad17a..77ecd68 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -88,7 +88,7 @@
 	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
 	MaxDecodedSize int
 
-	br byteReader
+	srcLen int
 
 	// MaxSymbolValue will override the maximum symbol value of the next block.
 	MaxSymbolValue uint8
@@ -170,7 +170,7 @@
 	if s.fse == nil {
 		s.fse = &fse.Scratch{}
 	}
-	s.br.init(in)
+	s.srcLen = len(in)
 
 	return s, nil
 }
diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 0000000..e54909e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+	int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 0000000..0cfb5c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+	"encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+	return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+	return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+	return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+	binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+	binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+	binary.LittleEndian.PutUint64(b, v)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 0000000..ada45cd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,55 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+	"unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+	//return binary.LittleEndian.Uint16(b[i:])
+	//return *(*uint16)(unsafe.Pointer(&b[i]))
+	return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+	//return binary.LittleEndian.Uint16(b[i:])
+	//return *(*uint16)(unsafe.Pointer(&b[i]))
+	return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+	//return binary.LittleEndian.Uint32(b[i:])
+	//return *(*uint32)(unsafe.Pointer(&b[i]))
+	return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+	//return binary.LittleEndian.Uint64(b[i:])
+	//return *(*uint64)(unsafe.Pointer(&b[i]))
+	return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+	//binary.LittleEndian.PutUint16(b, v)
+	*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+	//binary.LittleEndian.PutUint32(b, v)
+	*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+	//binary.LittleEndian.PutUint64(b, v)
+	*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba6..2754bac 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@
 // emitLiteral writes a literal chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= len(lit) && len(lit) <= 65536
 func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@
 // emitCopy writes a copy chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= offset && offset <= 65535
 //	4 <= length && length <= 65535
@@ -49,7 +51,7 @@
 	i := 0
 	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
 	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
-	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// length emitted down below is a little lower (at 60 = 64 - 4), because
 	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
 	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
 	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
@@ -85,28 +87,40 @@
 	return i + 2
 }
 
-// extendMatch returns the largest k such that k <= len(src) and that
-// src[i:i+k-j] and src[j:k] have the same contents.
-//
-// It assumes that:
-//	0 <= i && i < j && j <= len(src)
-func extendMatch(src []byte, i, j int) int {
-	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
-	}
-	return j
-}
-
 func hash(u, shift uint32) uint32 {
 	return (u * 0x1e35a7bd) >> shift
 }
 
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+	if MaxEncodedLen(len(src)) > len(dst) {
+		return 0
+	}
+
+	// encodeBlock breaks on too big blocks, so split.
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return d
+}
+
 // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
 // assumes that the varint-encoded length of the decompressed bytes has already
 // been written.
 //
 // It also assumes that:
+//
 //	len(dst) >= MaxEncodedLen(len(src)) &&
-// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+//	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
 func encodeBlock(dst, src []byte) (d int) {
 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
 	// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 2263853..81bda5e 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
 module github.com/klauspost/compress
 
-go 1.16
-
+go 1.22
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index beb7fa8..c11d7fa 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,12 +6,14 @@
 
 This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. 
 
-This package is pure Go and without use of "unsafe". 
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
 
 The `zstd` package is provided as open source software using a Go standard license.
 
 Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
 
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
 ## Installation
 
 Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -257,7 +259,7 @@
 
 ## Decompressor
 
-Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
 
 This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
 kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
@@ -302,7 +304,7 @@
 
 // Create a reader that caches decompressors.
 // For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
+var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
 
 // Decompress a buffer. We don't supply a destination buffer,
 // so it will be allocated by the decoder.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 97299d4..d41e3e1 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
 package zstd
 
 import (
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io"
 	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // bitReader reads a bitstream in reverse.
@@ -17,8 +18,8 @@
 // for aligning the input.
 type bitReader struct {
 	in       []byte
-	off      uint   // next byte to read is at in[off - 1]
 	value    uint64 // Maybe use [16]byte, but shifting is awkward.
+	cursor   int    // offset where next read should end
 	bitsRead uint8
 }
 
@@ -28,12 +29,12 @@
 		return errors.New("corrupt stream: too short")
 	}
 	b.in = in
-	b.off = uint(len(in))
 	// The highest bit of the last byte indicates where to start
 	v := in[len(in)-1]
 	if v == 0 {
 		return errors.New("corrupt stream, did not find end of stream")
 	}
+	b.cursor = len(in)
 	b.bitsRead = 64
 	b.value = 0
 	if len(in) >= 8 {
@@ -69,21 +70,16 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	// 2 bounds checks.
-	v := b.in[b.off-4:]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
-	b.value = (b.value << 32) | uint64(low)
+	b.cursor -= 4
+	b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
 	b.bitsRead -= 32
-	b.off -= 4
 }
 
 // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
 func (b *bitReader) fillFastStart() {
-	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.cursor -= 8
+	b.value = le.Load64(b.in, b.cursor)
 	b.bitsRead = 0
-	b.off -= 8
 }
 
 // fill() will make sure at least 32 bits are available.
@@ -91,25 +87,23 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	if b.off >= 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
-		b.value = (b.value << 32) | uint64(low)
+	if b.cursor >= 4 {
+		b.cursor -= 4
+		b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
 		b.bitsRead -= 32
-		b.off -= 4
 		return
 	}
-	for b.off > 0 {
-		b.value = (b.value << 8) | uint64(b.in[b.off-1])
-		b.bitsRead -= 8
-		b.off--
+
+	b.bitsRead -= uint8(8 * b.cursor)
+	for b.cursor > 0 {
+		b.cursor -= 1
+		b.value = (b.value << 8) | uint64(b.in[b.cursor])
 	}
 }
 
 // finished returns true if all bits have been read from the bit stream.
 func (b *bitReader) finished() bool {
-	return b.off == 0 && b.bitsRead >= 64
+	return b.cursor == 0 && b.bitsRead >= 64
 }
 
 // overread returns true if more bits have been requested than is on the stream.
@@ -119,13 +113,14 @@
 
 // remain returns the number of bits remaining.
 func (b *bitReader) remain() uint {
-	return b.off*8 + 64 - uint(b.bitsRead)
+	return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
 }
 
 // close the bitstream and returns an error if out-of-buffer reads occurred.
 func (b *bitReader) close() error {
 	// Release reference.
 	b.in = nil
+	b.cursor = 0
 	if !b.finished() {
 		return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 78b3c61..1952f17 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -97,12 +97,11 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
 
 // reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7eed729..0dd742f 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
 package zstd
 
 import (
-	"bytes"
-	"encoding/binary"
 	"errors"
 	"fmt"
+	"hash/crc32"
 	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"sync"
 
 	"github.com/klauspost/compress/huff0"
@@ -83,8 +79,9 @@
 
 	err error
 
-	// Check against this crc
-	checkCRC []byte
+	// Check against this crc, if hasCRC is true.
+	checkCRC uint32
+	hasCRC   bool
 
 	// Frame to use for singlethreaded decoding.
 	// Should not be used by the decoder itself since parent may be another frame.
@@ -192,16 +189,14 @@
 	}
 
 	// Read block data.
-	if cap(b.dataStorage) < cSize {
+	if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
+		// byteBuf doesn't need a destination buffer.
 		if b.lowMem || cSize > maxCompressedBlockSize {
 			b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
 		} else {
 			b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
 		}
 	}
-	if cap(b.dst) <= maxSize {
-		b.dst = make([]byte, 0, maxSize+1)
-	}
 	b.data, err = br.readBig(cSize, b.dataStorage)
 	if err != nil {
 		if debugDecoder {
@@ -210,6 +205,9 @@
 		}
 		return err
 	}
+	if cap(b.dst) <= maxSize {
+		b.dst = make([]byte, 0, maxSize+1)
+	}
 	return nil
 }
 
@@ -233,7 +231,7 @@
 			if b.lowMem {
 				b.dst = make([]byte, b.RLESize)
 			} else {
-				b.dst = make([]byte, maxBlockSize)
+				b.dst = make([]byte, maxCompressedBlockSize)
 			}
 		}
 		b.dst = b.dst[:b.RLESize]
@@ -441,6 +439,9 @@
 			}
 		}
 		var err error
+		if debugDecoder {
+			println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
+		}
 		huff, literals, err = huff0.ReadTable(literals, huff)
 		if err != nil {
 			println("reading huffman table:", err)
@@ -549,6 +550,9 @@
 		if debugDecoder {
 			printf("Compression modes: 0b%b", compMode)
 		}
+		if compMode&3 != 0 {
+			return errors.New("corrupt block: reserved bits not zero")
+		}
 		for i := uint(0); i < 3; i++ {
 			mode := seqCompMode((compMode >> (6 - i*2)) & 3)
 			if debugDecoder {
@@ -587,10 +591,12 @@
 				}
 				seq.fse.setRLE(symb)
 				if debugDecoder {
-					printf("RLE set to %+v, code: %v", symb, v)
+					printf("RLE set to 0x%x, code: %v", symb, v)
 				}
 			case compModeFSE:
-				println("Reading table for", tableIndex(i))
+				if debugDecoder {
+					println("Reading table for", tableIndex(i))
+				}
 				if seq.fse == nil || seq.fse.preDefined {
 					seq.fse = fseDecoderPool.Get().(*fseDecoder)
 				}
@@ -638,21 +644,6 @@
 		println("initializing sequences:", err)
 		return err
 	}
-	// Extract blocks...
-	if false && hist.dict == nil {
-		fatalErr := func(err error) {
-			if err != nil {
-				panic(err)
-			}
-		}
-		fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
-		var buf bytes.Buffer
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
-		buf.Write(in)
-		ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
-	}
 
 	return nil
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 12e8f6f..fd35ea1 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@
 	"fmt"
 	"math"
 	"math/bits"
+	"slices"
 
 	"github.com/klauspost/compress/huff0"
 )
@@ -361,14 +362,21 @@
 	if len(lits) >= 1024 {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
-	} else if len(lits) > 32 {
+	} else if len(lits) > 16 {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
 	} else {
 		err = huff0.ErrIncompressible
 	}
-
+	if err == nil && len(out)+5 > len(lits) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSizes(len(out), len(lits), single)
+		if len(out)+lh.size() >= len(lits) {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	case huff0.ErrIncompressible:
 		if debugEncoder {
@@ -420,6 +428,16 @@
 	return nil
 }
 
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(length)
+	bh.setType(blockTypeRLE)
+	b.output = bh.appendTo(b.output)
+	b.output = append(b.output, val)
+}
+
 // fuzzFseEncoder can be used to fuzz the FSE encoder.
 func fuzzFseEncoder(data []byte) int {
 	if len(data) > maxSequences || len(data) < 2 {
@@ -440,16 +458,7 @@
 		// All 0
 		return 0
 	}
-	maxCount := func(a []uint32) int {
-		var max uint32
-		for _, v := range a {
-			if v > max {
-				max = v
-			}
-		}
-		return int(max)
-	}
-	cnt := maxCount(hist[:maxSym])
+	cnt := int(slices.Max(hist[:maxSym]))
 	if cnt == len(data) {
 		// RLE
 		return 0
@@ -472,8 +481,18 @@
 	if len(b.sequences) == 0 {
 		return b.encodeLits(b.literals, rawAllLits)
 	}
+	if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+		// Check common RLE cases.
+		seq := b.sequences[0]
+		if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+			// Offset == 1 and 0 or 1 literals.
+			b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+			return nil
+		}
+	}
+
 	// We want some difference to at least account for the headers.
-	saved := b.size - len(b.literals) - (b.size >> 5)
+	saved := b.size - len(b.literals) - (b.size >> 6)
 	if saved < 16 {
 		if org == nil {
 			return errIncompressible
@@ -503,7 +522,7 @@
 	if len(b.literals) >= 1024 && !raw {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
-	} else if len(b.literals) > 32 && !raw {
+	} else if len(b.literals) > 16 && !raw {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@@ -511,6 +530,17 @@
 		err = huff0.ErrIncompressible
 	}
 
+	if err == nil && len(out)+5 > len(b.literals) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSize(len(b.literals))
+		szRaw := lh.size()
+		lh.setSizes(len(out), len(b.literals), single)
+		szComp := lh.size()
+		if len(out)+szComp >= len(b.literals)+szRaw {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	case huff0.ErrIncompressible:
 		lh.setType(literalsBlockRaw)
@@ -773,16 +803,16 @@
 	ml.flush(mlEnc.actualTableLog)
 	of.flush(ofEnc.actualTableLog)
 	ll.flush(llEnc.actualTableLog)
-	err = wr.close()
-	if err != nil {
-		return err
-	}
+	wr.close()
 	b.output = wr.out
 
+	// Maybe even add a bigger margin.
 	if len(b.output)-3-bhOffset >= b.size {
-		// Maybe even add a bigger margin.
+		// Discard and encode as raw block.
+		b.output = b.encodeRawTo(b.output[:bhOffset], org)
+		b.popOffsets()
 		b.litEnc.Reuse = huff0.ReusePolicyNone
-		return errIncompressible
+		return nil
 	}
 
 	// Size is output minus block header.
@@ -846,15 +876,6 @@
 			}
 		}
 	}
-	maxCount := func(a []uint32) int {
-		var max uint32
-		for _, v := range a {
-			if v > max {
-				max = v
-			}
-		}
-		return int(max)
-	}
 	if debugAsserts && mlMax > maxMatchLengthSymbol {
 		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
 	}
@@ -865,7 +886,7 @@
 		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
 	}
 
-	b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
-	b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
-	b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+	b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+	b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+	b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 2ad0207..55a3885 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 )
 
 type byteBuffer interface {
@@ -55,7 +54,7 @@
 func (b *byteBuf) readByte() (byte, error) {
 	bb := *b
 	if len(bb) < 1 {
-		return 0, nil
+		return 0, io.ErrUnexpectedEOF
 	}
 	r := bb[0]
 	*b = bb[1:]
@@ -110,7 +109,7 @@
 }
 
 func (r *readerWrapper) readByte() (byte, error) {
-	n2, err := r.r.Read(r.tmp[:1])
+	n2, err := io.ReadFull(r.r, r.tmp[:1])
 	if err != nil {
 		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
@@ -124,7 +123,7 @@
 }
 
 func (r *readerWrapper) skipN(n int64) error {
-	n2, err := io.CopyN(ioutil.Discard, r.r, n)
+	n2, err := io.CopyN(io.Discard, r.r, n)
 	if n2 != n {
 		err = io.ErrUnexpectedEOF
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 5022e71..6a5a298 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -4,7 +4,6 @@
 package zstd
 
 import (
-	"bytes"
 	"encoding/binary"
 	"errors"
 	"io"
@@ -96,42 +95,54 @@
 // If there isn't enough input, io.ErrUnexpectedEOF is returned.
 // The FirstBlock.OK will indicate if enough information was available to decode the first block header.
 func (h *Header) Decode(in []byte) error {
+	_, err := h.DecodeAndStrip(in)
+	return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
 	*h = Header{}
 	if len(in) < 4 {
-		return io.ErrUnexpectedEOF
+		return nil, io.ErrUnexpectedEOF
 	}
 	h.HeaderSize += 4
 	b, in := in[:4], in[4:]
-	if !bytes.Equal(b, frameMagic) {
-		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
-			return ErrMagicMismatch
+	if string(b) != frameMagic {
+		if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
+			return nil, ErrMagicMismatch
 		}
 		if len(in) < 4 {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		h.HeaderSize += 4
 		h.Skippable = true
 		h.SkippableID = int(b[0] & 0xf)
 		h.SkippableSize = binary.LittleEndian.Uint32(in)
-		return nil
+		return in[4:], nil
 	}
 
 	// Read Window_Descriptor
 	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
 	if len(in) < 1 {
-		return io.ErrUnexpectedEOF
+		return nil, io.ErrUnexpectedEOF
 	}
 	fhd, in := in[0], in[1:]
 	h.HeaderSize++
 	h.SingleSegment = fhd&(1<<5) != 0
 	h.HasCheckSum = fhd&(1<<2) != 0
 	if fhd&(1<<3) != 0 {
-		return errors.New("reserved bit set on frame header")
+		return nil, errors.New("reserved bit set on frame header")
 	}
 
 	if !h.SingleSegment {
 		if len(in) < 1 {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		var wd byte
 		wd, in = in[0], in[1:]
@@ -149,11 +160,11 @@
 			size = 4
 		}
 		if len(in) < int(size) {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		b, in = in[:size], in[size:]
 		h.HeaderSize += int(size)
-		switch size {
+		switch len(b) {
 		case 1:
 			h.DictionaryID = uint32(b[0])
 		case 2:
@@ -179,11 +190,11 @@
 	if fcsSize > 0 {
 		h.HasFCS = true
 		if len(in) < fcsSize {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		b, in = in[:fcsSize], in[fcsSize:]
 		h.HeaderSize += int(fcsSize)
-		switch fcsSize {
+		switch len(b) {
 		case 1:
 			h.FrameContentSize = uint64(b[0])
 		case 2:
@@ -200,7 +211,7 @@
 
 	// Frame Header done, we will not fail from now on.
 	if len(in) < 3 {
-		return nil
+		return in, nil
 	}
 	tmp := in[:3]
 	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
@@ -210,7 +221,7 @@
 	cSize := int(bh >> 3)
 	switch blockType {
 	case blockTypeReserved:
-		return nil
+		return in, nil
 	case blockTypeRLE:
 		h.FirstBlock.Compressed = true
 		h.FirstBlock.DecompressedSize = cSize
@@ -226,5 +237,25 @@
 	}
 
 	h.FirstBlock.OK = true
-	return nil
+	return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+	if h.Skippable {
+		magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+		magic[0] |= byte(h.SkippableID & 0xf)
+		dst = append(dst, magic[:]...)
+		f := h.SkippableSize
+		return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+	}
+	f := frameHeader{
+		ContentSize:   h.FrameContentSize,
+		WindowSize:    uint32(h.WindowSize),
+		SingleSegment: h.SingleSegment,
+		Checksum:      h.HasCheckSum,
+		DictID:        h.DictionaryID,
+	}
+	return f.appendTo(dst), nil
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d212f47..ea2a193 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,7 +5,6 @@
 package zstd
 
 import (
-	"bytes"
 	"context"
 	"encoding/binary"
 	"io"
@@ -35,13 +34,13 @@
 		br           readerWrapper
 		enabled      bool
 		inFrame      bool
+		dstBuf       []byte
 	}
 
 	frame *frameDec
 
 	// Custom dictionaries.
-	// Always uses copies.
-	dicts map[uint32]dict
+	dicts map[uint32]*dict
 
 	// streamWg is the waitgroup for all streams
 	streamWg sync.WaitGroup
@@ -83,7 +82,7 @@
 // can run multiple concurrent stateless decodes. It is even possible to
 // use stateless decodes while a stream is being decoded.
 //
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
 // reduce the allocations normally caused by NewReader.
 func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
 	initPredefined()
@@ -103,7 +102,7 @@
 	}
 
 	// Transfer option dicts.
-	d.dicts = make(map[uint32]dict, len(d.o.dicts))
+	d.dicts = make(map[uint32]*dict, len(d.o.dicts))
 	for _, dc := range d.o.dicts {
 		d.dicts[dc.id] = dc
 	}
@@ -124,7 +123,7 @@
 }
 
 // Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
 // When the stream is done, io.EOF will be returned.
 func (d *Decoder) Read(p []byte) (int, error) {
 	var n int
@@ -187,21 +186,23 @@
 	}
 
 	// If bytes buffer and < 5MB, do sync decoding anyway.
-	if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+	if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
 		bb2 := bb
 		if debugDecoder {
 			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
 		}
 		b := bb2.Bytes()
 		var dst []byte
-		if cap(d.current.b) > 0 {
-			dst = d.current.b
+		if cap(d.syncStream.dstBuf) > 0 {
+			dst = d.syncStream.dstBuf[:0]
 		}
 
-		dst, err := d.DecodeAll(b, dst[:0])
+		dst, err := d.DecodeAll(b, dst)
 		if err == nil {
 			err = io.EOF
 		}
+		// Save output buffer
+		d.syncStream.dstBuf = dst
 		d.current.b = dst
 		d.current.err = err
 		d.current.flushed = true
@@ -216,6 +217,7 @@
 	d.current.err = nil
 	d.current.flushed = false
 	d.current.d = nil
+	d.syncStream.dstBuf = nil
 
 	// Ensure no-one else is still running...
 	d.streamWg.Wait()
@@ -312,6 +314,7 @@
 	// Grab a block decoder and frame decoder.
 	block := <-d.decoders
 	frame := block.localFrame
+	initialSize := len(dst)
 	defer func() {
 		if debugDecoder {
 			printf("re-adding decoder: %p", block)
@@ -320,6 +323,7 @@
 		frame.bBuf = nil
 		if frame.history.decoders.br != nil {
 			frame.history.decoders.br.in = nil
+			frame.history.decoders.br.cursor = 0
 		}
 		d.decoders <- block
 	}()
@@ -337,15 +341,8 @@
 			}
 			return dst, err
 		}
-		if frame.DictionaryID != nil {
-			dict, ok := d.dicts[*frame.DictionaryID]
-			if !ok {
-				return nil, ErrUnknownDictionary
-			}
-			if debugDecoder {
-				println("setting dict", frame.DictionaryID)
-			}
-			frame.history.setDict(&dict)
+		if err = d.setDict(frame); err != nil {
+			return nil, err
 		}
 		if frame.WindowSize > d.o.maxWindowSize {
 			if debugDecoder {
@@ -354,7 +351,16 @@
 			return dst, ErrWindowSizeExceeded
 		}
 		if frame.FrameContentSize != fcsUnknown {
-			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+				}
+				return dst, ErrDecoderSizeExceeded
+			}
+			if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+				}
 				return dst, ErrDecoderSizeExceeded
 			}
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +370,7 @@
 			}
 		}
 
-		if cap(dst) == 0 {
+		if cap(dst) == 0 && !d.o.limitToCap {
 			// Allocate len(input) * 2 by default if nothing is provided
 			// and we didn't get frame content size.
 			size := len(input) * 2
@@ -382,6 +388,9 @@
 		if err != nil {
 			return dst, err
 		}
+		if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+			return dst, ErrDecoderSizeExceeded
+		}
 		if len(frame.bBuf) == 0 {
 			if debugDecoder {
 				println("frame dbuf empty")
@@ -442,26 +451,23 @@
 		println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
 	}
 
-	if !d.o.ignoreChecksum && len(next.b) > 0 {
-		n, err := d.current.crc.Write(next.b)
-		if err == nil {
-			if n != len(next.b) {
-				d.current.err = io.ErrShortWrite
-			}
-		}
+	if d.o.ignoreChecksum {
+		return true
 	}
-	if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
-		got := d.current.crc.Sum64()
-		var tmp [4]byte
-		binary.LittleEndian.PutUint32(tmp[:], uint32(got))
-		if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
+
+	if len(next.b) > 0 {
+		d.current.crc.Write(next.b)
+	}
+	if next.err == nil && next.d != nil && next.d.hasCRC {
+		got := uint32(d.current.crc.Sum64())
+		if got != next.d.checkCRC {
 			if debugDecoder {
-				println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+				printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
 			}
 			d.current.err = ErrCRCMismatch
 		} else {
 			if debugDecoder {
-				println("CRC ok", tmp[:])
+				printf("CRC ok %08x\n", got)
 			}
 		}
 	}
@@ -477,18 +483,12 @@
 		if !d.syncStream.inFrame {
 			d.frame.history.reset()
 			d.current.err = d.frame.reset(&d.syncStream.br)
+			if d.current.err == nil {
+				d.current.err = d.setDict(d.frame)
+			}
 			if d.current.err != nil {
 				return false
 			}
-			if d.frame.DictionaryID != nil {
-				dict, ok := d.dicts[*d.frame.DictionaryID]
-				if !ok {
-					d.current.err = ErrUnknownDictionary
-					return false
-				} else {
-					d.frame.history.setDict(&dict)
-				}
-			}
 			if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
 				d.current.err = ErrDecoderSizeExceeded
 				return false
@@ -667,6 +667,7 @@
 				if debugDecoder {
 					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 				}
+				hist.reset()
 				hist.decoders = block.async.newHist.decoders
 				hist.recentOffsets = block.async.newHist.recentOffsets
 				hist.windowSize = block.async.newHist.windowSize
@@ -698,6 +699,7 @@
 			seqExecute <- block
 		}
 		close(seqExecute)
+		hist.reset()
 	}()
 
 	var wg sync.WaitGroup
@@ -721,6 +723,7 @@
 				if debugDecoder {
 					println("Async 2: new history")
 				}
+				hist.reset()
 				hist.windowSize = block.async.newHist.windowSize
 				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 				if block.async.newHist.dict != nil {
@@ -750,7 +753,7 @@
 					if block.lowMem {
 						block.dst = make([]byte, block.RLESize)
 					} else {
-						block.dst = make([]byte, maxBlockSize)
+						block.dst = make([]byte, maxCompressedBlockSize)
 					}
 				}
 				block.dst = block.dst[:block.RLESize]
@@ -802,13 +805,14 @@
 		if debugDecoder {
 			println("decoder goroutines finished")
 		}
+		hist.reset()
 	}()
 
+	var hist history
 decodeStream:
 	for {
-		var hist history
 		var hasErr bool
-
+		hist.reset()
 		decodeBlock := func(block *blockDec) {
 			if hasErr {
 				if block != nil {
@@ -843,15 +847,14 @@
 		if debugDecoder && err != nil {
 			println("Frame decoder returned", err)
 		}
-		if err == nil && frame.DictionaryID != nil {
-			dict, ok := d.dicts[*frame.DictionaryID]
-			if !ok {
-				err = ErrUnknownDictionary
-			} else {
-				frame.history.setDict(&dict)
-			}
+		if err == nil {
+			err = d.setDict(frame)
 		}
 		if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+			if debugDecoder {
+				println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+			}
+
 			err = ErrDecoderSizeExceeded
 		}
 		if err != nil {
@@ -893,18 +896,22 @@
 				println("next block returned error:", err)
 			}
 			dec.err = err
-			dec.checkCRC = nil
+			dec.hasCRC = false
 			if dec.Last && frame.HasCheckSum && err == nil {
 				crc, err := frame.rawInput.readSmall(4)
-				if err != nil {
+				if len(crc) < 4 {
+					if err == nil {
+						err = io.ErrUnexpectedEOF
+
+					}
 					println("CRC missing?", err)
 					dec.err = err
-				}
-				var tmp [4]byte
-				copy(tmp[:], crc)
-				dec.checkCRC = tmp[:]
-				if debugDecoder {
-					println("found crc to check:", dec.checkCRC)
+				} else {
+					dec.checkCRC = binary.LittleEndian.Uint32(crc)
+					dec.hasCRC = true
+					if debugDecoder {
+						printf("found crc to check: %08x\n", dec.checkCRC)
+					}
 				}
 			}
 			err = dec.err
@@ -920,5 +927,23 @@
 	}
 	close(seqDecode)
 	wg.Wait()
+	hist.reset()
 	d.frame.history.b = frameHistCache
 }
+
+func (d *Decoder) setDict(frame *frameDec) (err error) {
+	dict, ok := d.dicts[frame.DictionaryID]
+	if ok {
+		if debugDecoder {
+			println("setting dict", frame.DictionaryID)
+		}
+		frame.history.setDict(dict)
+	} else if frame.DictionaryID != 0 {
+		// A zero or missing dictionary id is ambiguous:
+		// either dictionary zero, or no dictionary. In particular,
+		// zstd --patch-from uses this id for the source file,
+		// so only return an error if the dictionary id is not zero.
+		err = ErrUnknownDictionary
+	}
+	return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index c70e6fa..774c5f0 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -6,6 +6,8 @@
 
 import (
 	"errors"
+	"fmt"
+	"math/bits"
 	"runtime"
 )
 
@@ -14,20 +16,23 @@
 
 // options retains accumulated state of multiple options.
 type decoderOptions struct {
-	lowMem         bool
-	concurrent     int
-	maxDecodedSize uint64
-	maxWindowSize  uint64
-	dicts          []dict
-	ignoreChecksum bool
+	lowMem          bool
+	concurrent      int
+	maxDecodedSize  uint64
+	maxWindowSize   uint64
+	dicts           []*dict
+	ignoreChecksum  bool
+	limitToCap      bool
+	decodeBufsBelow int
 }
 
 func (o *decoderOptions) setDefault() {
 	*o = decoderOptions{
 		// use less ram: true for now, but may change.
-		lowMem:        true,
-		concurrent:    runtime.GOMAXPROCS(0),
-		maxWindowSize: MaxWindowSize,
+		lowMem:          true,
+		concurrent:      runtime.GOMAXPROCS(0),
+		maxWindowSize:   MaxWindowSize,
+		decodeBufsBelow: 128 << 10,
 	}
 	if o.concurrent > 4 {
 		o.concurrent = 4
@@ -82,7 +87,13 @@
 }
 
 // WithDecoderDicts allows to register one or more dictionaries for the decoder.
-// If several dictionaries with the same ID is provided the last one will be used.
+//
+// Each slice in dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// If several dictionaries with the same ID are provided, the last one will be used.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
 func WithDecoderDicts(dicts ...[]byte) DOption {
 	return func(o *decoderOptions) error {
 		for _, b := range dicts {
@@ -90,12 +101,24 @@
 			if err != nil {
 				return err
 			}
-			o.dicts = append(o.dicts, *d)
+			o.dicts = append(o.dicts, d)
 		}
 		return nil
 	}
 }
 
+// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
+// The slice content can be arbitrary data.
+func WithDecoderDictRaw(id uint32, content []byte) DOption {
+	return func(o *decoderOptions) error {
+		if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+			return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+		}
+		o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+		return nil
+	}
+}
+
 // WithDecoderMaxWindow allows to set a maximum window size for decodes.
 // This allows rejecting packets that will cause big memory usage.
 // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
@@ -114,6 +137,29 @@
 	}
 }
 
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+	return func(o *decoderOptions) error {
+		o.limitToCap = b
+		return nil
+	}
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+	return func(o *decoderOptions) error {
+		o.decodeBufsBelow = size
+		return nil
+	}
+}
+
 // IgnoreChecksum allows to forcibly ignore checksum checking.
 func IgnoreChecksum(b bool) DOption {
 	return func(o *decoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index a36ae83..b7b8316 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -6,6 +6,8 @@
 	"errors"
 	"fmt"
 	"io"
+	"math"
+	"sort"
 
 	"github.com/klauspost/compress/huff0"
 )
@@ -15,12 +17,14 @@
 
 	litEnc              *huff0.Scratch
 	llDec, ofDec, mlDec sequenceDec
-	//llEnc, ofEnc, mlEnc []*fseEncoder
-	offsets [3]int
-	content []byte
+	offsets             [3]int
+	content             []byte
 }
 
-var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+const dictMagic = "\x37\xa4\x30\xec"
+
+// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
+const dictMaxLength = 1 << 31
 
 // ID returns the dictionary id or 0 if d is nil.
 func (d *dict) ID() uint32 {
@@ -30,14 +34,38 @@
 	return d.id
 }
 
-// DictContentSize returns the dictionary content size or 0 if d is nil.
-func (d *dict) DictContentSize() int {
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
 	if d == nil {
 		return 0
 	}
 	return len(d.content)
 }
 
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+	if d == nil {
+		return nil
+	}
+	return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+	if d == nil {
+		return [3]int{}
+	}
+	return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+	if d == nil {
+		return nil
+	}
+	return d.litEnc
+}
+
 // Load a dictionary as described in
 // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
 func loadDict(b []byte) (*dict, error) {
@@ -50,7 +78,7 @@
 		ofDec: sequenceDec{fse: &fseDecoder{}},
 		mlDec: sequenceDec{fse: &fseDecoder{}},
 	}
-	if !bytes.Equal(b[:4], dictMagic[:]) {
+	if string(b[:4]) != dictMagic {
 		return nil, ErrMagicMismatch
 	}
 	d.id = binary.LittleEndian.Uint32(b[4:8])
@@ -62,7 +90,7 @@
 	var err error
 	d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("loading literal table: %w", err)
 	}
 	d.litEnc.Reuse = huff0.ReusePolicyMust
 
@@ -120,3 +148,418 @@
 
 	return &d, nil
 }
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+	ID() uint32
+	ContentSize() int
+	Content() []byte
+	Offsets() [3]int
+	LitEncoder() *huff0.Scratch
+}, error) {
+	initPredefined()
+	d, err := loadDict(b)
+	return d, err
+}
+
+type BuildDictOptions struct {
+	// Dictionary ID.
+	ID uint32
+
+	// Content to use to create dictionary tables.
+	Contents [][]byte
+
+	// History to use for all blocks.
+	History []byte
+
+	// Offsets to use.
+	Offsets [3]int
+
+	// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
+	// See https://github.com/facebook/zstd/issues/3724
+	CompatV155 bool
+
+	// Use the specified encoder level.
+	// The dictionary will be built using the specified encoder level,
+	// which will reflect speed and make the dictionary tailored for that level.
+	// If not set SpeedBestCompression will be used.
+	Level EncoderLevel
+
+	// DebugOut will write stats and other details here if set.
+	DebugOut io.Writer
+}
+
+func BuildDict(o BuildDictOptions) ([]byte, error) {
+	initPredefined()
+	hist := o.History
+	contents := o.Contents
+	debug := o.DebugOut != nil
+	println := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintln(o.DebugOut, args...)
+		}
+	}
+	printf := func(s string, args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintf(o.DebugOut, s, args...)
+		}
+	}
+	print := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprint(o.DebugOut, args...)
+		}
+	}
+
+	if int64(len(hist)) > dictMaxLength {
+		return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
+	}
+	if len(hist) < 8 {
+		return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
+	}
+	if len(contents) == 0 {
+		return nil, errors.New("no content provided")
+	}
+	d := dict{
+		id:      o.ID,
+		litEnc:  nil,
+		llDec:   sequenceDec{},
+		ofDec:   sequenceDec{},
+		mlDec:   sequenceDec{},
+		offsets: o.Offsets,
+		content: hist,
+	}
+	block := blockEnc{lowMem: false}
+	block.init()
+	enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
+	if o.Level != 0 {
+		eOpts := encoderOptions{
+			level:      o.Level,
+			blockSize:  maxMatchLen,
+			windowSize: maxMatchLen,
+			dict:       &d,
+			lowMem:     false,
+		}
+		enc = eOpts.encoder()
+	} else {
+		o.Level = SpeedBestCompression
+	}
+	var (
+		remain [256]int
+		ll     [256]int
+		ml     [256]int
+		of     [256]int
+	)
+	addValues := func(dst *[256]int, src []byte) {
+		for _, v := range src {
+			dst[v]++
+		}
+	}
+	addHist := func(dst *[256]int, src *[256]uint32) {
+		for i, v := range src {
+			dst[i] += int(v)
+		}
+	}
+	seqs := 0
+	nUsed := 0
+	litTotal := 0
+	newOffsets := make(map[uint32]int, 1000)
+	for _, b := range contents {
+		block.reset(nil)
+		if len(b) < 8 {
+			continue
+		}
+		nUsed++
+		enc.Reset(&d, true)
+		enc.Encode(&block, b)
+		addValues(&remain, block.literals)
+		litTotal += len(block.literals)
+		if len(block.sequences) == 0 {
+			continue
+		}
+		seqs += len(block.sequences)
+		block.genCodes()
+		addHist(&ll, block.coders.llEnc.Histogram())
+		addHist(&ml, block.coders.mlEnc.Histogram())
+		addHist(&of, block.coders.ofEnc.Histogram())
+		for i, seq := range block.sequences {
+			if i > 3 {
+				break
+			}
+			offset := seq.offset
+			if offset == 0 {
+				continue
+			}
+			if int(offset) >= len(o.History) {
+				continue
+			}
+			if offset > 3 {
+				newOffsets[offset-3]++
+			} else {
+				newOffsets[uint32(o.Offsets[offset-1])]++
+			}
+		}
+	}
+	// Find most used offsets.
+	var sortedOffsets []uint32
+	for k := range newOffsets {
+		sortedOffsets = append(sortedOffsets, k)
+	}
+	sort.Slice(sortedOffsets, func(i, j int) bool {
+		a, b := sortedOffsets[i], sortedOffsets[j]
+		if a == b {
+			// Prefer the longer offset
+			return sortedOffsets[i] > sortedOffsets[j]
+		}
+		return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
+	})
+	if len(sortedOffsets) > 3 {
+		if debug {
+			print("Offsets:")
+			for i, v := range sortedOffsets {
+				if i > 20 {
+					break
+				}
+				printf("[%d: %d],", v, newOffsets[v])
+			}
+			println("")
+		}
+
+		sortedOffsets = sortedOffsets[:3]
+	}
+	for i, v := range sortedOffsets {
+		o.Offsets[i] = int(v)
+	}
+	if debug {
+		println("New repeat offsets", o.Offsets)
+	}
+
+	if nUsed == 0 || seqs == 0 {
+		return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
+	}
+	if debug {
+		println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
+	}
+	if seqs/nUsed < 512 {
+		// Use 512 as minimum.
+		nUsed = seqs / 512
+		if nUsed == 0 {
+			nUsed = 1
+		}
+	}
+	copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
+		hist := dst.Histogram()
+		var maxSym uint8
+		var maxCount int
+		var fakeLength int
+		for i, v := range src {
+			if v > 0 {
+				v = v / nUsed
+				if v == 0 {
+					v = 1
+				}
+			}
+			if v > maxCount {
+				maxCount = v
+			}
+			if v != 0 {
+				maxSym = uint8(i)
+			}
+			fakeLength += v
+			hist[i] = uint32(v)
+		}
+
+		// Ensure we aren't trying to represent RLE.
+		if maxCount == fakeLength {
+			for i := range hist {
+				if uint8(i) == maxSym {
+					fakeLength++
+					maxSym++
+					hist[i+1] = 1
+					if maxSym > 1 {
+						break
+					}
+				}
+				if hist[0] == 0 {
+					fakeLength++
+					hist[i] = 1
+					if maxSym > 1 {
+						break
+					}
+				}
+			}
+		}
+
+		dst.HistogramFinished(maxSym, maxCount)
+		dst.reUsed = false
+		dst.useRLE = false
+		err := dst.normalizeCount(fakeLength)
+		if err != nil {
+			return nil, err
+		}
+		if debug {
+			println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
+		}
+		return dst.writeCount(nil)
+	}
+	if debug {
+		print("Literal lengths: ")
+	}
+	llTable, err := copyHist(block.coders.llEnc, &ll)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Match lengths: ")
+	}
+	mlTable, err := copyHist(block.coders.mlEnc, &ml)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Offsets: ")
+	}
+	ofTable, err := copyHist(block.coders.ofEnc, &of)
+	if err != nil {
+		return nil, err
+	}
+
+	// Literal table
+	avgSize := litTotal
+	if avgSize > huff0.BlockSizeMax/2 {
+		avgSize = huff0.BlockSizeMax / 2
+	}
+	huffBuff := make([]byte, 0, avgSize)
+	// Target size
+	div := litTotal / avgSize
+	if div < 1 {
+		div = 1
+	}
+	if debug {
+		println("Huffman weights:")
+	}
+	for i, n := range remain[:] {
+		if n > 0 {
+			n = n / div
+			// Allow all entries to be represented.
+			if n == 0 {
+				n = 1
+			}
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			if debug {
+				printf("[%d: %d], ", i, n)
+			}
+		}
+	}
+	if o.CompatV155 && remain[255]/div == 0 {
+		huffBuff = append(huffBuff, 255)
+	}
+	scratch := &huff0.Scratch{TableLog: 11}
+	for tries := 0; tries < 255; tries++ {
+		scratch = &huff0.Scratch{TableLog: 11}
+		_, _, err = huff0.Compress1X(huffBuff, scratch)
+		if err == nil {
+			break
+		}
+		if debug {
+			printf("Try %d: Huffman error: %v\n", tries+1, err)
+		}
+		huffBuff = huffBuff[:0]
+		if tries == 250 {
+			if debug {
+				println("Huffman: Bailing out with predefined table")
+			}
+
+			// Bail out.... Just generate something
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
+			for i := 0; i < 128; i++ {
+				huffBuff = append(huffBuff, byte(i))
+			}
+			continue
+		}
+		if errors.Is(err, huff0.ErrIncompressible) {
+			// Try truncating least common.
+			for i, n := range remain[:] {
+				if n > 0 {
+					n = n / (div * (i + 1))
+					if n > 0 {
+						huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+					}
+				}
+			}
+			if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
+				huffBuff = append(huffBuff, 255)
+			}
+			if len(huffBuff) == 0 {
+				huffBuff = append(huffBuff, 0, 255)
+			}
+		}
+		if errors.Is(err, huff0.ErrUseRLE) {
+			for i, n := range remain[:] {
+				n = n / (div * (i + 1))
+				// Allow all entries to be represented.
+				if n == 0 {
+					n = 1
+				}
+				huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			}
+		}
+	}
+
+	var out bytes.Buffer
+	out.Write([]byte(dictMagic))
+	out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
+	out.Write(scratch.OutTable)
+	if debug {
+		println("huff table:", len(scratch.OutTable), "bytes")
+		println("of table:", len(ofTable), "bytes")
+		println("ml table:", len(mlTable), "bytes")
+		println("ll table:", len(llTable), "bytes")
+	}
+	out.Write(ofTable)
+	out.Write(mlTable)
+	out.Write(llTable)
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
+	out.Write(hist)
+	if debug {
+		_, err := loadDict(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		i, err := InspectDictionary(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		println("ID:", i.ID())
+		println("Content size:", i.ContentSize())
+		println("Encoder:", i.LitEncoder() != nil)
+		println("Offsets:", i.Offsets())
+		var totalSize int
+		for _, b := range contents {
+			totalSize += len(b)
+		}
+
+		encWith := func(opts ...EOption) int {
+			enc, err := NewWriter(nil, opts...)
+			if err != nil {
+				panic(err)
+			}
+			defer enc.Close()
+			var dst []byte
+			var totalSize int
+			for _, b := range contents {
+				dst = enc.EncodeAll(b, dst[:0])
+				totalSize += len(dst)
+			}
+			return totalSize
+		}
+		plain := encWith(WithEncoderLevel(o.Level))
+		withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
+		println("Input size:", totalSize)
+		println("Plain Compressed:", plain)
+		println("Dict Compressed:", withDict)
+		println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
+	}
+	return out.Bytes(), nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 15ae8ee..7d250c6 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -16,6 +16,7 @@
 	cur int32
 	// maximum offset. Should be at least 2x block size.
 	maxMatchOff int32
+	bufferReset int32
 	hist        []byte
 	crc         *xxhash.Digest
 	tmp         [8]byte
@@ -56,8 +57,8 @@
 }
 
 func (e *fastBase) addBlock(src []byte) int32 {
-	if debugAsserts && e.cur > bufferReset {
-		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+	if debugAsserts && e.cur > e.bufferReset {
+		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
 	}
 	// check if we have space already
 	if len(e.hist)+len(src) > cap(e.hist) {
@@ -115,7 +116,7 @@
 			panic(err)
 		}
 		if t < 0 {
-			err := fmt.Sprintf("s (%d) < 0", s)
+			err := fmt.Sprintf("t (%d) < 0", t)
 			panic(err)
 		}
 		if s-t > e.maxMatchOff {
@@ -126,24 +127,7 @@
 			panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
 		}
 	}
-	a := src[s:]
-	b := src[t:]
-	b = b[:len(a)]
-	end := int32((len(a) >> 3) << 3)
-	for i := int32(0); i < end; i += 8 {
-		if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
-			return i + int32(bits.TrailingZeros64(diff)>>3)
-		}
-	}
-
-	a = a[end:]
-	b = b[end:]
-	for i := range a {
-		if a[i] != b[i] {
-			return int32(i) + end
-		}
-	}
-	return int32(len(a)) + end
+	return int32(matchLen(src[s:], src[t:]))
 }
 
 // Reset the encoding table.
@@ -160,18 +144,19 @@
 	} else {
 		e.crc.Reset()
 	}
+	e.blk.dictLitEnc = nil
 	if d != nil {
 		low := e.lowMem
 		if singleBlock {
 			e.lowMem = true
 		}
-		e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+		e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
 		e.lowMem = low
 	}
 
 	// We offset current position so everything will be out of reach.
 	// If above reset line, history will be purged.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += e.maxMatchOff + int32(len(e.hist))
 	}
 	e.hist = e.hist[:0]
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ec..4613724 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -34,7 +34,7 @@
 	est    int32
 }
 
-const highScore = 25000
+const highScore = maxMatchLen * 8
 
 // estBits will estimate output bits from predefined tables.
 func (m *match) estBits(bitsPerByte int32) {
@@ -43,7 +43,7 @@
 	if m.rep < 0 {
 		ofc = ofCode(uint32(m.s-m.offset) + 3)
 	} else {
-		ofc = ofCode(uint32(m.rep))
+		ofc = ofCode(uint32(m.rep) & 3)
 	}
 	// Cost, excluding
 	ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -84,14 +84,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = prevEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = prevEntry{}
-			}
+			e.table = [bestShortTableSize]prevEntry{}
+			e.longTable = [bestLongTableSize]prevEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -139,8 +135,20 @@
 		break
 	}
 
+	// Add block to history
 	s := e.addBlock(src)
 	blk.size = len(src)
+
+	// Check RLE first
+	if len(src) > zstdMinMatch {
+		ml := matchLen(src[1:], src)
+		if ml == len(src)-1 {
+			blk.literals = append(blk.literals, src[0])
+			blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+			return
+		}
+	}
+
 	if len(src) < minNonLiteralBlockSize {
 		blk.extraLits = len(src)
 		blk.literals = blk.literals[:len(src)]
@@ -163,7 +171,6 @@
 
 	// nextEmit is where in src the next emitLiteral should start from.
 	nextEmit := s
-	cv := load6432(src, s)
 
 	// Relative offsets
 	offset1 := int32(blk.recentOffsets[0])
@@ -177,7 +184,6 @@
 		blk.literals = append(blk.literals, src[nextEmit:until]...)
 		s.litLen = uint32(until - nextEmit)
 	}
-	_ = addLiterals
 
 	if debugEncoder {
 		println("recent offsets:", blk.recentOffsets)
@@ -192,54 +198,104 @@
 			panic("offset0 was 0")
 		}
 
-		bestOf := func(a, b match) match {
-			if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
-				return a
-			}
-			return b
-		}
-		const goodEnough = 100
+		const goodEnough = 250
+
+		cv := load6432(src, s)
 
 		nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
 		nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
 		candidateL := e.longTable[nextHashL]
 		candidateS := e.table[nextHashS]
 
-		matchAt := func(offset int32, s int32, first uint32, rep int32) match {
-			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
-				return match{s: s, est: highScore}
+		// Set m to a match at offset if it looks like that will improve compression.
+		improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
+			delta := s - offset
+			if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
+				return
 			}
-			if debugAsserts {
-				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
-					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+			// Try to quick reject if we already have a long match.
+			if m.length > 16 {
+				left := len(src) - int(m.s+m.length)
+				// If we are too close to the end, keep as is.
+				if left <= 0 {
+					return
+				}
+				checkLen := m.length - (s - m.s) - 8
+				if left > 2 && checkLen > 4 {
+					// Check 4 bytes, 4 bytes from the end of the current match.
+					a := load3232(src, offset+checkLen)
+					b := load3232(src, s+checkLen)
+					if a != b {
+						return
+					}
 				}
 			}
-			m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
-			m.estBits(bitsPerByte)
-			return m
+			l := 4 + e.matchlen(s+4, offset+4, src)
+			if m.rep <= 0 {
+				// Extend candidate match backwards as far as possible.
+				// Do not extend repeats as we can assume they are optimal
+				// and offsets change if s == nextEmit.
+				tMin := s - e.maxMatchOff
+				if tMin < 0 {
+					tMin = 0
+				}
+				for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
+					s--
+					offset--
+					l++
+				}
+			}
+			if debugAsserts {
+				if offset >= s {
+					panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+				}
+				if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+					panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+				}
+			}
+			cand := match{offset: offset, s: s, length: l, rep: rep}
+			cand.estBits(bitsPerByte)
+			if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
+				*m = cand
+			}
 		}
 
-		best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
-		best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
-		best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+		best := match{s: s, est: highScore}
+		improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
 
 		if canRepeat && best.length < goodEnough {
-			cv32 := uint32(cv >> 8)
-			spp := s + 1
-			best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
-			best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
-			best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
-			if best.length > 0 {
-				cv32 = uint32(cv >> 24)
-				spp += 2
-				best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
-				best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
-				best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+			if s == nextEmit {
+				// Check repeats straight after a match.
+				improve(&best, s-offset2, s, uint32(cv), 1|4)
+				improve(&best, s-offset3, s, uint32(cv), 2|4)
+				if offset1 > 1 {
+					improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
+				}
+			}
+
+			// If either no match or a non-repeat match, check at + 1
+			if best.rep <= 0 {
+				cv32 := uint32(cv >> 8)
+				spp := s + 1
+				improve(&best, spp-offset1, spp, cv32, 1)
+				improve(&best, spp-offset2, spp, cv32, 2)
+				improve(&best, spp-offset3, spp, cv32, 3)
+				if best.rep < 0 {
+					cv32 = uint32(cv >> 24)
+					spp += 2
+					improve(&best, spp-offset1, spp, cv32, 1)
+					improve(&best, spp-offset2, spp, cv32, 2)
+					improve(&best, spp-offset3, spp, cv32, 3)
+				}
 			}
 		}
 		// Load next and check...
 		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
 		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+		index0 := s + 1
 
 		// Look far ahead, unless we have a really long match already...
 		if best.length < goodEnough {
@@ -249,97 +305,89 @@
 				if s >= sLimit {
 					break encodeLoop
 				}
-				cv = load6432(src, s)
 				continue
 			}
 
-			s++
 			candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
-			cv = load6432(src, s)
-			cv2 := load6432(src, s+1)
+			cv = load6432(src, s+1)
+			cv2 := load6432(src, s+2)
 			candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
 			candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
 
 			// Short at s+1
-			best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+			improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
 			// Long at s+1, s+2
-			best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
-			best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
-			best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
-			best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+			improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
+			improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
+			improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
+			improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
 			if false {
 				// Short at s+3.
 				// Too often worse...
-				best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+				improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
 			}
-			// See if we can find a better match by checking where the current best ends.
-			// Use that offset to see if we can find a better full match.
-			if sAt := best.s + best.length; sAt < sLimit {
-				nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
-				candidateEnd := e.longTable[nextHashL]
-				if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
-					bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
-					if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
-						bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+
+			// Start check at a fixed offset to allow for a few mismatches.
+			// For this compression level 2 yields the best results.
+			// We cannot do this if we have already indexed this position.
+			const skipBeginning = 2
+			if best.s > s-skipBeginning {
+				// See if we can find a better match by checking where the current best ends.
+				// Use that offset to see if we can find a better full match.
+				if sAt := best.s + best.length; sAt < sLimit {
+					nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+					candidateEnd := e.longTable[nextHashL]
+
+					if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
+						improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+						if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
+							improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+						}
 					}
-					best = bestEnd
 				}
 			}
 		}
 
 		if debugAsserts {
+			if best.offset >= best.s {
+				panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+			}
+			if best.s < nextEmit {
+				panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+			}
+			if best.offset < s-e.maxMatchOff {
+				panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+			}
 			if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
 				panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
 			}
 		}
 
 		// We have a match, we can store the forward value
+		s = best.s
 		if best.rep > 0 {
-			s = best.s
 			var seq seq
 			seq.matchLen = uint32(best.length - zstdMinMatch)
+			addLiterals(&seq, best.s)
 
-			// We might be able to match backwards.
-			// Extend as long as we can.
-			start := best.s
-			// We end the search early, so we don't risk 0 literals
-			// and have to do special offset treatment.
-			startLimit := nextEmit + 1
-
-			tMin := s - e.maxMatchOff
-			if tMin < 0 {
-				tMin = 0
-			}
-			repIndex := best.offset
-			for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
-				repIndex--
-				start--
-				seq.matchLen++
-			}
-			addLiterals(&seq, start)
-
-			// rep 0
-			seq.offset = uint32(best.rep)
+			// Repeat. If bit 4 is set, this is a non-lit repeat.
+			seq.offset = uint32(best.rep & 3)
 			if debugSequences {
-				println("repeat sequence", seq, "next s:", s)
+				println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
 			}
 			blk.sequences = append(blk.sequences, seq)
 
-			// Index match start+1 (long) -> s - 1
-			index0 := s
+			// Index old s + 1 -> s - 1
 			s = best.s + best.length
-
 			nextEmit = s
-			if s >= sLimit {
-				if debugEncoder {
-					println("repeat ended", s, best.length)
 
-				}
-				break encodeLoop
-			}
 			// Index skipped...
+			end := s
+			if s > sLimit+4 {
+				end = sLimit + 4
+			}
 			off := index0 + e.cur
-			for index0 < s-1 {
+			for index0 < end {
 				cv0 := load6432(src, index0)
 				h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 				h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -348,19 +396,26 @@
 				off++
 				index0++
 			}
+
 			switch best.rep {
-			case 2:
+			case 2, 4 | 1:
 				offset1, offset2 = offset2, offset1
-			case 3:
+			case 3, 4 | 2:
 				offset1, offset2, offset3 = offset3, offset1, offset2
+			case 4 | 3:
+				offset1, offset2, offset3 = offset1-1, offset1, offset2
 			}
-			cv = load6432(src, s)
+			if s >= sLimit {
+				if debugEncoder {
+					println("repeat ended", s, best.length)
+				}
+				break encodeLoop
+			}
 			continue
 		}
 
 		// A 4-byte match has been found. Update recent offsets.
 		// We'll later see if more than 4 bytes.
-		s = best.s
 		t := best.offset
 		offset1, offset2, offset3 = s-t, offset1, offset2
 
@@ -372,22 +427,9 @@
 			panic("invalid offset")
 		}
 
-		// Extend the n-byte match as long as possible.
-		l := best.length
-
-		// Extend backwards
-		tMin := s - e.maxMatchOff
-		if tMin < 0 {
-			tMin = 0
-		}
-		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
-			s--
-			t--
-			l++
-		}
-
 		// Write our sequence
 		var seq seq
+		l := best.length
 		seq.litLen = uint32(s - nextEmit)
 		seq.matchLen = uint32(l - zstdMinMatch)
 		if seq.litLen > 0 {
@@ -400,65 +442,25 @@
 		}
 		blk.sequences = append(blk.sequences, seq)
 		nextEmit = s
-		if s >= sLimit {
-			break encodeLoop
+
+		// Index old s + 1 -> s - 1 or sLimit
+		end := s
+		if s > sLimit-4 {
+			end = sLimit - 4
 		}
 
-		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
-		// every entry
-		for index0 < s-1 {
+		off := index0 + e.cur
+		for index0 < end {
 			cv0 := load6432(src, index0)
 			h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 			h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
 			index0++
+			off++
 		}
-
-		cv = load6432(src, s)
-		if !canRepeat {
-			continue
-		}
-
-		// Check offset 2
-		for {
-			o2 := s - offset2
-			if load3232(src, o2) != uint32(cv) {
-				// Do regular search
-				break
-			}
-
-			// Store this, since we have it.
-			nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
-			nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
-
-			// We have at least 4 byte match.
-			// No need to check backwards. We come straight from a match
-			l := 4 + e.matchlen(s+4, o2+4, src)
-
-			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
-			e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
-			seq.matchLen = uint32(l) - zstdMinMatch
-			seq.litLen = 0
-
-			// Since litlen is always 0, this is offset 1.
-			seq.offset = 1
-			s += l
-			nextEmit = s
-			if debugSequences {
-				println("sequence", seq, "next s:", s)
-			}
-			blk.sequences = append(blk.sequences, seq)
-
-			// Swap offset 1 and 2.
-			offset1, offset2 = offset2, offset1
-			if s >= sLimit {
-				// Finished
-				break encodeLoop
-			}
-			cv = load6432(src, s)
+		if s >= sLimit {
+			break encodeLoop
 		}
 	}
 
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c769f69..84a79fd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -62,14 +62,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = prevEntry{}
-			}
+			e.table = [betterShortTableSize]tableEntry{}
+			e.longTable = [betterLongTableSize]prevEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -106,9 +102,20 @@
 		e.cur = e.maxMatchOff
 		break
 	}
-
+	// Add block to history
 	s := e.addBlock(src)
 	blk.size = len(src)
+
+	// Check RLE first
+	if len(src) > zstdMinMatch {
+		ml := matchLen(src[1:], src)
+		if ml == len(src)-1 {
+			blk.literals = append(blk.literals, src[0])
+			blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+			return
+		}
+	}
+
 	if len(src) < minNonLiteralBlockSize {
 		blk.extraLits = len(src)
 		blk.literals = blk.literals[:len(src)]
@@ -149,7 +156,7 @@
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -166,14 +173,15 @@
 			off := s + e.cur
 			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -202,12 +210,12 @@
 
 					// Index match start+1 (long) -> s - 1
 					index0 := s + repOff
-					s += lenght + repOff
+					s += length + repOff
 
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -233,9 +241,9 @@
 				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
 					// Consider history as well.
 					var seq seq
-					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+					length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -262,12 +270,11 @@
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
-					s += lenght + repOff2
+					s += length + repOff2
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -416,15 +423,23 @@
 
 		// Try to find a better match by searching for a long match at the end of the current best match
 		if s+matched < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is around 3 bytes, but depends on input.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 3
+
 			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
-			cv := load3232(src, s)
+			s2 := s + skipBeginning
+			cv := load3232(src, s2)
 			candidateL := e.longTable[nextHashL]
-			coffsetL := candidateL.offset - e.cur - matched
-			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+			coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+			if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 				// Found a long match, at least 4 bytes.
-				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 				if matchedNext > matched {
 					t = coffsetL
+					s = s2
 					matched = matchedNext
 					if debugMatches {
 						println("long match at end-of-match")
@@ -434,12 +449,13 @@
 
 			// Check prev long...
 			if true {
-				coffsetL = candidateL.prev - e.cur - matched
-				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+				if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 					// Found a long match, at least 4 bytes.
-					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 					if matchedNext > matched {
 						t = coffsetL
+						s = s2
 						matched = matchedNext
 						if debugMatches {
 							println("prev long match at end-of-match")
@@ -493,15 +509,15 @@
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
@@ -578,7 +594,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -667,7 +683,7 @@
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -686,14 +702,15 @@
 			e.markLongShardDirty(nextHashL)
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
 			e.markShortShardDirty(nextHashS)
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -721,13 +738,12 @@
 					blk.sequences = append(blk.sequences, seq)
 
 					// Index match start+1 (long) -> s - 1
-					index0 := s + repOff
-					s += lenght + repOff
+					s += length + repOff
 
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -756,9 +772,9 @@
 				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
 					// Consider history as well.
 					var seq seq
-					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+					length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -785,12 +801,11 @@
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
-					s += lenght + repOff2
+					s += length + repOff2
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -1019,18 +1034,18 @@
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.markLongShardDirty(h0)
 			h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
 			e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			e.markShortShardDirty(h1)
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 7ff0c64..d36be7b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -44,14 +44,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = tableEntry{}
-			}
+			e.table = [dFastShortTableSize]tableEntry{}
+			e.longTable = [dFastLongTableSize]tableEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -142,9 +138,9 @@
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -170,11 +166,11 @@
 						println("repeat sequence", seq, "next s:", s)
 					}
 					blk.sequences = append(blk.sequences, seq)
-					s += lenght + repOff
+					s += length + repOff
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -388,7 +384,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	if e.cur >= bufferReset {
+	if e.cur >= e.bufferReset {
 		for i := range e.table[:] {
 			e.table[i] = tableEntry{}
 		}
@@ -685,7 +681,7 @@
 	}
 
 	// We do not store history, so we must offset e.cur to avoid false matches for next user.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += int32(len(src))
 	}
 }
@@ -700,7 +696,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -802,9 +798,9 @@
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -830,11 +826,11 @@
 						println("repeat sequence", seq, "next s:", s)
 					}
 					blk.sequences = append(blk.sequences, seq)
-					s += lenght + repOff
+					s += length + repOff
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -1088,7 +1084,7 @@
 			}
 		}
 		e.lastDictID = d.id
-		e.allDirty = true
+		allDirty = true
 	}
 	// Reset table to initial state
 	e.cur = e.maxMatchOff
@@ -1103,7 +1099,8 @@
 	}
 
 	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
-		copy(e.longTable[:], e.dictLongTable)
+		//copy(e.longTable[:], e.dictLongTable)
+		e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
 		for i := range e.longTableShardDirty {
 			e.longTableShardDirty[i] = false
 		}
@@ -1114,7 +1111,9 @@
 			continue
 		}
 
-		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
 		e.longTableShardDirty[i] = false
 	}
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab52..f45a3da 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -43,7 +43,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -133,8 +133,7 @@
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 				seq.matchLen = uint32(length - zstdMinMatch)
 
 				// We might be able to match backwards.
@@ -304,13 +303,13 @@
 		minNonLiteralBlockSize = 1 + 1 + inputMargin
 	)
 	if debugEncoder {
-		if len(src) > maxBlockSize {
+		if len(src) > maxCompressedBlockSize {
 			panic("src too big")
 		}
 	}
 
 	// Protect against e.cur wraparound.
-	if e.cur >= bufferReset {
+	if e.cur >= e.bufferReset {
 		for i := range e.table[:] {
 			e.table[i] = tableEntry{}
 		}
@@ -538,7 +537,7 @@
 		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
 	}
 	// We do not store history, so we must offset e.cur to avoid false matches for next user.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += int32(len(src))
 	}
 }
@@ -555,11 +554,9 @@
 		return
 	}
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
+			e.table = [tableSize]tableEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -647,8 +644,7 @@
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 
 				seq.matchLen = uint32(length - zstdMinMatch)
 
@@ -833,13 +829,12 @@
 		}
 		if true {
 			end := e.maxMatchOff + int32(len(d.content)) - 8
-			for i := e.maxMatchOff; i < end; i += 3 {
+			for i := e.maxMatchOff; i < end; i += 2 {
 				const hashLog = tableBits
 
 				cv := load6432(d.content, i-e.maxMatchOff)
-				nextHash := hashLen(cv, hashLog, tableFastHashLen)      // 0 -> 5
-				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen)  // 1 -> 6
-				nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
+				nextHash := hashLen(cv, hashLog, tableFastHashLen)     // 0 -> 6
+				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
 				e.dictTable[nextHash] = tableEntry{
 					val:    uint32(cv),
 					offset: i,
@@ -848,10 +843,6 @@
 					val:    uint32(cv >> 8),
 					offset: i + 1,
 				}
-				e.dictTable[nextHash2] = tableEntry{
-					val:    uint32(cv >> 16),
-					offset: i + 2,
-				}
 			}
 		}
 		e.lastDictID = d.id
@@ -871,7 +862,8 @@
 	const shardCnt = tableShardCnt
 	const shardSize = tableShardSize
 	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
-		copy(e.table[:], e.dictTable)
+		//copy(e.table[:], e.dictTable)
+		e.table = *(*[tableSize]tableEntry)(e.dictTable)
 		for i := range e.tableShardDirty {
 			e.tableShardDirty[i] = false
 		}
@@ -883,7 +875,8 @@
 			continue
 		}
 
-		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
 		e.tableShardDirty[i] = false
 	}
 	e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 7aaaedb..8f8223c 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,8 +6,10 @@
 
 import (
 	"crypto/rand"
+	"errors"
 	"fmt"
 	"io"
+	"math"
 	rdebug "runtime/debug"
 	"sync"
 
@@ -148,6 +150,9 @@
 // and write CRC if requested.
 func (e *Encoder) Write(p []byte) (n int, err error) {
 	s := &e.state
+	if s.eofWritten {
+		return 0, ErrEncoderClosed
+	}
 	for len(p) > 0 {
 		if len(p)+len(s.filling) < e.o.blockSize {
 			if e.o.crc {
@@ -201,7 +206,7 @@
 			return nil
 		}
 		if final && len(s.filling) > 0 {
-			s.current = e.EncodeAll(s.filling, s.current[:0])
+			s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
 			var n2 int
 			n2, s.err = s.w.Write(s.current)
 			if s.err != nil {
@@ -226,10 +231,7 @@
 			DictID:        e.o.dict.ID(),
 		}
 
-		dst, err := fh.appendTo(tmp[:0])
-		if err != nil {
-			return err
-		}
+		dst := fh.appendTo(tmp[:0])
 		s.headerWritten = true
 		s.wWg.Wait()
 		var n2 int
@@ -276,23 +278,9 @@
 			s.eofWritten = true
 		}
 
-		err := errIncompressible
-		// If we got the exact same number of literals as input,
-		// assume the literals cannot be compressed.
-		if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
-			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-		}
-		switch err {
-		case errIncompressible:
-			if debugEncoder {
-				println("Storing incompressible block as raw")
-			}
-			blk.encodeRaw(src)
-			// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
-		case nil:
-		default:
-			s.err = err
-			return err
+		s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		if s.err != nil {
+			return s.err
 		}
 		_, s.err = s.w.Write(blk.output)
 		s.nWritten += int64(len(blk.output))
@@ -304,6 +292,9 @@
 	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
 	s.nInput += int64(len(s.current))
 	s.wg.Add(1)
+	if final {
+		s.eofWritten = true
+	}
 	go func(src []byte) {
 		if debugEncoder {
 			println("Adding block,", len(src), "bytes, final:", final)
@@ -319,9 +310,6 @@
 		blk := enc.Block()
 		enc.Encode(blk, src)
 		blk.last = final
-		if final {
-			s.eofWritten = true
-		}
 		// Wait for pending writes.
 		s.wWg.Wait()
 		if s.writeErr != nil {
@@ -342,22 +330,8 @@
 				}
 				s.wWg.Done()
 			}()
-			err := errIncompressible
-			// If we got the exact same number of literals as input,
-			// assume the literals cannot be compressed.
-			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
-				err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-			}
-			switch err {
-			case errIncompressible:
-				if debugEncoder {
-					println("Storing incompressible block as raw")
-				}
-				blk.encodeRaw(src)
-				// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
-			case nil:
-			default:
-				s.writeErr = err
+			s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+			if s.writeErr != nil {
 				return
 			}
 			_, s.writeErr = s.w.Write(blk.output)
@@ -431,12 +405,20 @@
 	if len(s.filling) > 0 {
 		err := e.nextBlock(false)
 		if err != nil {
+			// Ignore Flush after Close.
+			if errors.Is(s.err, ErrEncoderClosed) {
+				return nil
+			}
 			return err
 		}
 	}
 	s.wg.Wait()
 	s.wWg.Wait()
 	if s.err != nil {
+		// Ignore Flush after Close.
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return s.err
 	}
 	return s.writeErr
@@ -452,6 +434,9 @@
 	}
 	err := e.nextBlock(true)
 	if err != nil {
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return err
 	}
 	if s.frameContentSize > 0 {
@@ -489,6 +474,11 @@
 		}
 		_, s.err = s.w.Write(frame)
 	}
+	if s.err == nil {
+		s.err = ErrEncoderClosed
+		return nil
+	}
+
 	return s.err
 }
 
@@ -499,6 +489,15 @@
 // Data compressed with EncodeAll can be decoded with the Decoder,
 // using either a stream or DecodeAll.
 func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+	e.init.Do(e.initialize)
+	enc := <-e.encoders
+	defer func() {
+		e.encoders <- enc
+	}()
+	return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
 	if len(src) == 0 {
 		if e.o.fullZero {
 			// Add frame header.
@@ -510,7 +509,7 @@
 				Checksum: false,
 				DictID:   0,
 			}
-			dst, _ = fh.appendTo(dst)
+			dst = fh.appendTo(dst)
 
 			// Write raw block as last one only.
 			var blk blockHeader
@@ -521,13 +520,7 @@
 		}
 		return dst
 	}
-	e.init.Do(e.initialize)
-	enc := <-e.encoders
-	defer func() {
-		// Release encoder reference to last block.
-		// If a non-single block is needed the encoder will reset again.
-		e.encoders <- enc
-	}()
+
 	// Use single segments when above minimum window and below window size.
 	single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
 	if e.o.single != nil {
@@ -545,10 +538,7 @@
 	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
 		dst = make([]byte, 0, len(src))
 	}
-	dst, err := fh.appendTo(dst)
-	if err != nil {
-		panic(err)
-	}
+	dst = fh.appendTo(dst)
 
 	// If we can do everything in one block, prefer that.
 	if len(src) <= e.o.blockSize {
@@ -567,25 +557,15 @@
 
 		// If we got the exact same number of literals as input,
 		// assume the literals cannot be compressed.
-		err := errIncompressible
 		oldout := blk.output
-		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
-			// Output directly to dst
-			blk.output = dst
-			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-		}
+		// Output directly to dst
+		blk.output = dst
 
-		switch err {
-		case errIncompressible:
-			if debugEncoder {
-				println("Storing incompressible block as raw")
-			}
-			dst = blk.encodeRawTo(dst, src)
-		case nil:
-			dst = blk.output
-		default:
+		err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		if err != nil {
 			panic(err)
 		}
+		dst = blk.output
 		blk.output = oldout
 	} else {
 		enc.Reset(e.o.dict, false)
@@ -604,25 +584,11 @@
 			if len(src) == 0 {
 				blk.last = true
 			}
-			err := errIncompressible
-			// If we got the exact same number of literals as input,
-			// assume the literals cannot be compressed.
-			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
-				err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
-			}
-
-			switch err {
-			case errIncompressible:
-				if debugEncoder {
-					println("Storing incompressible block as raw")
-				}
-				dst = blk.encodeRawTo(dst, todo)
-				blk.popOffsets()
-			case nil:
-				dst = append(dst, blk.output...)
-			default:
+			err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+			if err != nil {
 				panic(err)
 			}
+			dst = append(dst, blk.output...)
 			blk.reset(nil)
 		}
 	}
@@ -632,6 +598,7 @@
 	// Add padding with content from crypto/rand.Reader
 	if e.o.pad > 0 {
 		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		var err error
 		dst, err = skippableFrame(dst, add, rand.Reader)
 		if err != nil {
 			panic(err)
@@ -639,3 +606,37 @@
 	}
 	return dst
 }
+
+// MaxEncodedSize returns the expected maximum
+// size of an encoded block or stream.
+func (e *Encoder) MaxEncodedSize(size int) int {
+	frameHeader := 4 + 2 // magic + frame header & window descriptor
+	if e.o.dict != nil {
+		frameHeader += 4
+	}
+	// Frame content size:
+	if size < 256 {
+		frameHeader++
+	} else if size < 65536+256 {
+		frameHeader += 2
+	} else if size < math.MaxInt32 {
+		frameHeader += 4
+	} else {
+		frameHeader += 8
+	}
+	// Final crc
+	if e.o.crc {
+		frameHeader += 4
+	}
+
+	// Max overhead is 3 bytes/block.
+	// There cannot be 0 blocks.
+	blocks := (size + e.o.blockSize) / e.o.blockSize
+
+	// Combine, add padding.
+	maxSz := frameHeader + 3*blocks + size
+	if e.o.pad > 1 {
+		maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
+	}
+	return maxSz
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index a7c5e1a..20671dc 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -3,6 +3,8 @@
 import (
 	"errors"
 	"fmt"
+	"math"
+	"math/bits"
 	"runtime"
 	"strings"
 )
@@ -37,7 +39,7 @@
 		blockSize:     maxCompressedBlockSize,
 		windowSize:    8 << 20,
 		level:         SpeedDefault,
-		allLitEntropy: true,
+		allLitEntropy: false,
 		lowMem:        false,
 	}
 }
@@ -47,22 +49,22 @@
 	switch o.level {
 	case SpeedFastest:
 		if o.dict != nil {
-			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 		}
-		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 
 	case SpeedDefault:
 		if o.dict != nil {
-			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
 		}
-		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 	case SpeedBetterCompression:
 		if o.dict != nil {
-			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 		}
-		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 	case SpeedBestCompression:
-		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 	}
 	panic("unknown compression level")
 }
@@ -92,7 +94,7 @@
 // The value must be a power of two between MinWindowSize and MaxWindowSize.
 // A larger value will enable better compression but allocate more memory and,
 // for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
 func WithWindowSize(n int) EOption {
 	return func(o *encoderOptions) error {
 		switch {
@@ -127,7 +129,7 @@
 		}
 		// No need to waste our time.
 		if n == 1 {
-			o.pad = 0
+			n = 0
 		}
 		if n > 1<<30 {
 			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
@@ -230,13 +232,13 @@
 			case SpeedDefault:
 				o.windowSize = 8 << 20
 			case SpeedBetterCompression:
-				o.windowSize = 16 << 20
+				o.windowSize = 8 << 20
 			case SpeedBestCompression:
-				o.windowSize = 32 << 20
+				o.windowSize = 8 << 20
 			}
 		}
 		if !o.customALEntropy {
-			o.allLitEntropy = l > SpeedFastest
+			o.allLitEntropy = l > SpeedDefault
 		}
 
 		return nil
@@ -304,7 +306,13 @@
 }
 
 // WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
 // The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
 func WithEncoderDict(dict []byte) EOption {
 	return func(o *encoderOptions) error {
 		d, err := loadDict(dict)
@@ -315,3 +323,17 @@
 		return nil
 	}
 }
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+	return func(o *encoderOptions) error {
+		if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+			return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+		}
+		o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 9568a4b..e47af66 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -5,7 +5,7 @@
 package zstd
 
 import (
-	"bytes"
+	"encoding/binary"
 	"encoding/hex"
 	"errors"
 	"io"
@@ -29,7 +29,7 @@
 
 	FrameContentSize uint64
 
-	DictionaryID  *uint32
+	DictionaryID  uint32
 	HasCheckSum   bool
 	SingleSegment bool
 }
@@ -43,9 +43,9 @@
 	MaxWindowSize = 1 << 29
 )
 
-var (
-	frameMagic          = []byte{0x28, 0xb5, 0x2f, 0xfd}
-	skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+const (
+	frameMagic          = "\x28\xb5\x2f\xfd"
+	skippableFrameMagic = "\x2a\x4d\x18"
 )
 
 func newFrameDec(o decoderOptions) *frameDec {
@@ -73,25 +73,25 @@
 		switch err {
 		case io.EOF, io.ErrUnexpectedEOF:
 			return io.EOF
-		default:
-			return err
 		case nil:
 			signature[0] = b[0]
+		default:
+			return err
 		}
 		// Read the rest, don't allow io.ErrUnexpectedEOF
 		b, err = br.readSmall(3)
 		switch err {
 		case io.EOF:
 			return io.EOF
-		default:
-			return err
 		case nil:
 			copy(signature[1:], b)
+		default:
+			return err
 		}
 
-		if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
+		if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
 			if debugDecoder {
-				println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
+				println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
 			}
 			// Break if not skippable frame.
 			break
@@ -114,9 +114,9 @@
 			return err
 		}
 	}
-	if !bytes.Equal(signature[:], frameMagic) {
+	if string(signature[:]) != frameMagic {
 		if debugDecoder {
-			println("Got magic numbers: ", signature, "want:", frameMagic)
+			println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
 		}
 		return ErrMagicMismatch
 	}
@@ -146,7 +146,9 @@
 			}
 			return err
 		}
-		printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		if debugDecoder {
+			printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		}
 		windowLog := 10 + (wd >> 3)
 		windowBase := uint64(1) << windowLog
 		windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -155,7 +157,7 @@
 
 	// Read Dictionary_ID
 	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
-	d.DictionaryID = nil
+	d.DictionaryID = 0
 	if size := fhd & 3; size != 0 {
 		if size == 3 {
 			size = 4
@@ -167,7 +169,7 @@
 			return err
 		}
 		var id uint32
-		switch size {
+		switch len(b) {
 		case 1:
 			id = uint32(b[0])
 		case 2:
@@ -178,11 +180,7 @@
 		if debugDecoder {
 			println("Dict size", size, "ID:", id)
 		}
-		if id > 0 {
-			// ID 0 means "sorry, no dictionary anyway".
-			// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
-			d.DictionaryID = &id
-		}
+		d.DictionaryID = id
 	}
 
 	// Read Frame_Content_Size
@@ -204,7 +202,7 @@
 			println("Reading Frame content", err)
 			return err
 		}
-		switch fcsSize {
+		switch len(b) {
 		case 1:
 			d.FrameContentSize = uint64(b[0])
 		case 2:
@@ -261,11 +259,16 @@
 	}
 	d.history.windowSize = int(d.WindowSize)
 	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
-		// Alloc 2x window size if not low-mem, or very small window size.
+		// Alloc 2x window size if not low-mem, or window size below 2MB.
 		d.history.allocFrameBuffer = d.history.windowSize * 2
 	} else {
-		// Alloc with one additional block
-		d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		if d.o.lowMem {
+			// Alloc with 1MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+		} else {
+			// Alloc with 2MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		}
 	}
 
 	if debugDecoder {
@@ -292,58 +295,41 @@
 	return nil
 }
 
-// checkCRC will check the checksum if the frame has one.
+// checkCRC will check the checksum, assuming the frame has one.
 // Will return ErrCRCMismatch if crc check failed, otherwise nil.
 func (d *frameDec) checkCRC() error {
-	if !d.HasCheckSum {
-		return nil
-	}
-
 	// We can overwrite upper tmp now
-	want, err := d.rawInput.readSmall(4)
+	buf, err := d.rawInput.readSmall(4)
 	if err != nil {
 		println("CRC missing?", err)
 		return err
 	}
 
-	if d.o.ignoreChecksum {
-		return nil
-	}
+	want := binary.LittleEndian.Uint32(buf[:4])
+	got := uint32(d.crc.Sum64())
 
-	var tmp [4]byte
-	got := d.crc.Sum64()
-	// Flip to match file order.
-	tmp[0] = byte(got >> 0)
-	tmp[1] = byte(got >> 8)
-	tmp[2] = byte(got >> 16)
-	tmp[3] = byte(got >> 24)
-
-	if !bytes.Equal(tmp[:], want) {
+	if got != want {
 		if debugDecoder {
-			println("CRC Check Failed:", tmp[:], "!=", want)
+			printf("CRC check failed: got %08x, want %08x\n", got, want)
 		}
 		return ErrCRCMismatch
 	}
 	if debugDecoder {
-		println("CRC ok", tmp[:])
+		printf("CRC ok %08x\n", got)
 	}
 	return nil
 }
 
-// consumeCRC reads the checksum data if the frame has one.
+// consumeCRC skips over the checksum, assuming the frame has one.
 func (d *frameDec) consumeCRC() error {
-	if d.HasCheckSum {
-		_, err := d.rawInput.readSmall(4)
-		if err != nil {
-			println("CRC missing?", err)
-			return err
-		}
+	_, err := d.rawInput.readSmall(4)
+	if err != nil {
+		println("CRC missing?", err)
 	}
-
-	return nil
+	return err
 }
 
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
 func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	saved := d.history.b
 
@@ -353,12 +339,23 @@
 	// Store input length, so we only check new data.
 	crcStart := len(dst)
 	d.history.decoders.maxSyncLen = 0
+	if d.o.limitToCap {
+		d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+	}
 	if d.FrameContentSize != fcsUnknown {
-		d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+			d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		}
 		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+			if debugDecoder {
+				println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+			}
 			return dst, ErrDecoderSizeExceeded
 		}
-		if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+		if debugDecoder {
+			println("maxSyncLen:", d.history.decoders.maxSyncLen)
+		}
+		if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
 			// Alloc for output
 			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 			copy(dst2, dst)
@@ -378,7 +375,13 @@
 		if err != nil {
 			break
 		}
-		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+		if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+			println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.o.limitToCap && len(d.history.b) > cap(dst) {
+			println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
 			err = ErrDecoderSizeExceeded
 			break
 		}
@@ -402,15 +405,8 @@
 			if d.o.ignoreChecksum {
 				err = d.consumeCRC()
 			} else {
-				var n int
-				n, err = d.crc.Write(dst[crcStart:])
-				if err == nil {
-					if n != len(dst)-crcStart {
-						err = io.ErrShortWrite
-					} else {
-						err = d.checkCRC()
-					}
-				}
+				d.crc.Write(dst[crcStart:])
+				err = d.checkCRC()
 			}
 		}
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 4ef7f5a..667ca06 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -22,7 +22,7 @@
 
 const maxHeaderSize = 14
 
-func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+func (f frameHeader) appendTo(dst []byte) []byte {
 	dst = append(dst, frameMagic...)
 	var fhd uint8
 	if f.Checksum {
@@ -76,7 +76,7 @@
 		if f.SingleSegment {
 			dst = append(dst, uint8(f.ContentSize))
 		}
-		// Unless SingleSegment is set, framessizes < 256 are nto stored.
+		// Unless SingleSegment is set, framessizes < 256 are not stored.
 	case 1:
 		f.ContentSize -= 256
 		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
@@ -88,7 +88,7 @@
 	default:
 		panic("invalid fcs")
 	}
-	return dst, nil
+	return dst
 }
 
 const skippableFrameHeader = 4 + 4
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
index c881d28..d04a829 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -21,7 +21,8 @@
 
 // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 // Function returns non-zero exit code on error.
-// go:noescape
+//
+//go:noescape
 func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 
 // please keep in sync with _generate/gen_fse.go
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
index da32b44..bcde398 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -1,7 +1,6 @@
 // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 TEXT ·buildDtable_asm(SB), $0-24
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
index 332e51f..8adfebb 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -20,10 +20,9 @@
 			if v == -1 {
 				s.dt[highThreshold].setAddBits(uint8(i))
 				highThreshold--
-				symbolNext[i] = 1
-			} else {
-				symbolNext[i] = uint16(v)
+				v = 1
 			}
+			symbolNext[i] = uint16(v)
 		}
 	}
 
@@ -35,10 +34,12 @@
 		for ss, v := range s.norm[:s.symbolLen] {
 			for i := 0; i < int(v); i++ {
 				s.dt[position].setAddBits(uint8(ss))
-				position = (position + step) & tableMask
-				for position > highThreshold {
+				for {
 					// lowprob area
 					position = (position + step) & tableMask
+					if position <= highThreshold {
+						break
+					}
 				}
 			}
 		}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index 28b4015..0916485 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -37,26 +37,23 @@
 	h.ignoreBuffer = 0
 	h.error = false
 	h.recentOffsets = [3]int{1, 4, 8}
-	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
+	h.decoders.freeDecoders()
 	h.decoders = sequenceDecs{br: h.decoders.br}
-	if h.huffTree != nil {
-		if h.dict == nil || h.dict.litEnc != h.huffTree {
-			huffDecoderPool.Put(h.huffTree)
-		}
-	}
+	h.freeHuffDecoder()
 	h.huffTree = nil
 	h.dict = nil
 	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
 }
 
+func (h *history) freeHuffDecoder() {
+	if h.huffTree != nil {
+		if h.dict == nil || h.dict.litEnc != h.huffTree {
+			huffDecoderPool.Put(h.huffTree)
+			h.huffTree = nil
+		}
+	}
+}
+
 func (h *history) setDict(dict *dict) {
 	if dict == nil {
 		return
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
index 69aa3bb..777290d 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -2,12 +2,7 @@
 
 VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
 
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
 high-quality hashing algorithm that is much faster than anything in the Go
 standard library.
 
@@ -28,31 +23,49 @@
 func (*Digest) Sum64() uint64
 ```
 
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
 
 ## Benchmarks
 
 Here are some quick benchmarks comparing the pure-Go and assembly
 implementations of Sum64.
 
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
-| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego    | asm       |
+| ---------- | --------- | --------- |
+| 4 B        |  1.3 GB/s |  1.2 GB/s |
+| 16 B       |  2.9 GB/s |  3.5 GB/s |
+| 100 B      |  6.9 GB/s |  8.1 GB/s |
+| 4 KB       | 11.7 GB/s | 16.7 GB/s |
+| 10 MB      | 12.0 GB/s | 17.3 GB/s |
 
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
 
 ```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
 ```
 
 ## Projects using this package
 
 - [InfluxDB](https://github.com/influxdata/influxdb)
 - [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
 - [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 2c112a0..fc40c82 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -18,19 +18,11 @@
 	prime5 uint64 = 2870177450012600261
 )
 
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
-	prime1v = prime1
-	prime2v = prime2
-	prime3v = prime3
-	prime4v = prime4
-	prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
 
 // Digest implements hash.Hash64.
 type Digest struct {
@@ -52,10 +44,10 @@
 
 // Reset clears the Digest's state so that it can be reused.
 func (d *Digest) Reset() {
-	d.v1 = prime1v + prime2
+	d.v1 = primes[0] + prime2
 	d.v2 = prime2
 	d.v3 = 0
-	d.v4 = -prime1v
+	d.v4 = -primes[0]
 	d.total = 0
 	d.n = 0
 }
@@ -71,21 +63,23 @@
 	n = len(b)
 	d.total += uint64(n)
 
+	memleft := d.mem[d.n&(len(d.mem)-1):]
+
 	if d.n+n < 32 {
 		// This new data doesn't even fill the current block.
-		copy(d.mem[d.n:], b)
+		copy(memleft, b)
 		d.n += n
 		return
 	}
 
 	if d.n > 0 {
 		// Finish off the partial block.
-		copy(d.mem[d.n:], b)
+		c := copy(memleft, b)
 		d.v1 = round(d.v1, u64(d.mem[0:8]))
 		d.v2 = round(d.v2, u64(d.mem[8:16]))
 		d.v3 = round(d.v3, u64(d.mem[16:24]))
 		d.v4 = round(d.v4, u64(d.mem[24:32]))
-		b = b[32-d.n:]
+		b = b[c:]
 		d.n = 0
 	}
 
@@ -135,21 +129,20 @@
 
 	h += d.total
 
-	i, end := 0, d.n
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(d.mem[i:i+8]))
+	b := d.mem[:d.n&(len(d.mem)-1)]
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for i < end {
-		h ^= uint64(d.mem[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
-		i++
 	}
 
 	h ^= h >> 33
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index cea1785..ddb63aa 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,3 +1,4 @@
+//go:build !appengine && gc && !purego && !noasm
 // +build !appengine
 // +build gc
 // +build !purego
@@ -5,212 +6,205 @@
 
 #include "textflag.h"
 
-// Register allocation:
-// AX	h
-// SI	pointer to advance through b
-// DX	n
-// BX	loop end
-// R8	v1, k1
-// R9	v2
-// R10	v3
-// R11	v4
-// R12	tmp
-// R13	prime1v
-// R14	prime2v
-// DI	prime4v
+// Registers:
+#define h      AX
+#define d      AX
+#define p      SI // pointer to advance through b
+#define n      DX
+#define end    BX // loop end
+#define v1     R8
+#define v2     R9
+#define v3     R10
+#define v4     R11
+#define x      R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
 
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
-	MOVQ  (SI), R12 \
-	ADDQ  $8, SI    \
-	IMULQ R14, R12  \
-	ADDQ  R12, r    \
-	ROLQ  $31, r    \
-	IMULQ R13, r
+#define round(acc, x) \
+	IMULQ prime2, x   \
+	ADDQ  x, acc      \
+	ROLQ  $31, acc    \
+	IMULQ prime1, acc
 
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
-	IMULQ R14, val \
-	ROLQ  $31, val \
-	IMULQ R13, val \
-	XORQ  val, acc \
-	IMULQ R13, acc \
-	ADDQ  DI, acc
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	IMULQ prime2, x \
+	ROLQ  $31, x    \
+	IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+	round0(x)         \
+	XORQ  x, acc      \
+	IMULQ prime1, acc \
+	ADDQ  prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop:  \
+	MOVQ +0(p), x  \
+	round(v1, x)   \
+	MOVQ +8(p), x  \
+	round(v2, x)   \
+	MOVQ +16(p), x \
+	round(v3, x)   \
+	MOVQ +24(p), x \
+	round(v4, x)   \
+	ADDQ $32, p    \
+	CMPQ p, end    \
+	JLE  loop
 
 // func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
 	// Load fixed primes.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
-	MOVQ ·prime4v(SB), DI
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
+	MOVQ ·primes+24(SB), prime4
 
 	// Load slice.
-	MOVQ b_base+0(FP), SI
-	MOVQ b_len+8(FP), DX
-	LEAQ (SI)(DX*1), BX
+	MOVQ b_base+0(FP), p
+	MOVQ b_len+8(FP), n
+	LEAQ (p)(n*1), end
 
 	// The first loop limit will be len(b)-32.
-	SUBQ $32, BX
+	SUBQ $32, end
 
 	// Check whether we have at least one block.
-	CMPQ DX, $32
+	CMPQ n, $32
 	JLT  noBlocks
 
 	// Set up initial state (v1, v2, v3, v4).
-	MOVQ R13, R8
-	ADDQ R14, R8
-	MOVQ R14, R9
-	XORQ R10, R10
-	XORQ R11, R11
-	SUBQ R13, R11
+	MOVQ prime1, v1
+	ADDQ prime2, v1
+	MOVQ prime2, v2
+	XORQ v3, v3
+	XORQ v4, v4
+	SUBQ prime1, v4
 
-	// Loop until SI > BX.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
+	blockLoop()
 
-	CMPQ SI, BX
-	JLE  blockLoop
+	MOVQ v1, h
+	ROLQ $1, h
+	MOVQ v2, x
+	ROLQ $7, x
+	ADDQ x, h
+	MOVQ v3, x
+	ROLQ $12, x
+	ADDQ x, h
+	MOVQ v4, x
+	ROLQ $18, x
+	ADDQ x, h
 
-	MOVQ R8, AX
-	ROLQ $1, AX
-	MOVQ R9, R12
-	ROLQ $7, R12
-	ADDQ R12, AX
-	MOVQ R10, R12
-	ROLQ $12, R12
-	ADDQ R12, AX
-	MOVQ R11, R12
-	ROLQ $18, R12
-	ADDQ R12, AX
-
-	mergeRound(AX, R8)
-	mergeRound(AX, R9)
-	mergeRound(AX, R10)
-	mergeRound(AX, R11)
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
 
 	JMP afterBlocks
 
 noBlocks:
-	MOVQ ·prime5v(SB), AX
+	MOVQ ·primes+32(SB), h
 
 afterBlocks:
-	ADDQ DX, AX
+	ADDQ n, h
 
-	// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
-	ADDQ $24, BX
+	ADDQ $24, end
+	CMPQ p, end
+	JG   try4
 
-	CMPQ SI, BX
-	JG   fourByte
+loop8:
+	MOVQ  (p), x
+	ADDQ  $8, p
+	round0(x)
+	XORQ  x, h
+	ROLQ  $27, h
+	IMULQ prime1, h
+	ADDQ  prime4, h
 
-wordLoop:
-	// Calculate k1.
-	MOVQ  (SI), R8
-	ADDQ  $8, SI
-	IMULQ R14, R8
-	ROLQ  $31, R8
-	IMULQ R13, R8
+	CMPQ p, end
+	JLE  loop8
 
-	XORQ  R8, AX
-	ROLQ  $27, AX
-	IMULQ R13, AX
-	ADDQ  DI, AX
+try4:
+	ADDQ $4, end
+	CMPQ p, end
+	JG   try1
 
-	CMPQ SI, BX
-	JLE  wordLoop
+	MOVL  (p), x
+	ADDQ  $4, p
+	IMULQ prime1, x
+	XORQ  x, h
 
-fourByte:
-	ADDQ $4, BX
-	CMPQ SI, BX
-	JG   singles
+	ROLQ  $23, h
+	IMULQ prime2, h
+	ADDQ  ·primes+16(SB), h
 
-	MOVL  (SI), R8
-	ADDQ  $4, SI
-	IMULQ R13, R8
-	XORQ  R8, AX
-
-	ROLQ  $23, AX
-	IMULQ R14, AX
-	ADDQ  ·prime3v(SB), AX
-
-singles:
-	ADDQ $4, BX
-	CMPQ SI, BX
+try1:
+	ADDQ $4, end
+	CMPQ p, end
 	JGE  finalize
 
-singlesLoop:
-	MOVBQZX (SI), R12
-	ADDQ    $1, SI
-	IMULQ   ·prime5v(SB), R12
-	XORQ    R12, AX
+loop1:
+	MOVBQZX (p), x
+	ADDQ    $1, p
+	IMULQ   ·primes+32(SB), x
+	XORQ    x, h
+	ROLQ    $11, h
+	IMULQ   prime1, h
 
-	ROLQ  $11, AX
-	IMULQ R13, AX
-
-	CMPQ SI, BX
-	JL   singlesLoop
+	CMPQ p, end
+	JL   loop1
 
 finalize:
-	MOVQ  AX, R12
-	SHRQ  $33, R12
-	XORQ  R12, AX
-	IMULQ R14, AX
-	MOVQ  AX, R12
-	SHRQ  $29, R12
-	XORQ  R12, AX
-	IMULQ ·prime3v(SB), AX
-	MOVQ  AX, R12
-	SHRQ  $32, R12
-	XORQ  R12, AX
+	MOVQ  h, x
+	SHRQ  $33, x
+	XORQ  x, h
+	IMULQ prime2, h
+	MOVQ  h, x
+	SHRQ  $29, x
+	XORQ  x, h
+	IMULQ ·primes+16(SB), h
+	MOVQ  h, x
+	SHRQ  $32, x
+	XORQ  x, h
 
-	MOVQ AX, ret+24(FP)
+	MOVQ h, ret+24(FP)
 	RET
 
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
 // func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
 	// Load fixed primes needed for round.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
 
 	// Load slice.
-	MOVQ b_base+8(FP), SI
-	MOVQ b_len+16(FP), DX
-	LEAQ (SI)(DX*1), BX
-	SUBQ $32, BX
+	MOVQ b_base+8(FP), p
+	MOVQ b_len+16(FP), n
+	LEAQ (p)(n*1), end
+	SUBQ $32, end
 
 	// Load vN from d.
-	MOVQ d+0(FP), AX
-	MOVQ 0(AX), R8   // v1
-	MOVQ 8(AX), R9   // v2
-	MOVQ 16(AX), R10 // v3
-	MOVQ 24(AX), R11 // v4
+	MOVQ s+0(FP), d
+	MOVQ 0(d), v1
+	MOVQ 8(d), v2
+	MOVQ 16(d), v3
+	MOVQ 24(d), v4
 
 	// We don't need to check the loop condition here; this function is
 	// always called with at least one block of data to process.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ SI, BX
-	JLE  blockLoop
+	blockLoop()
 
 	// Copy vN back to d.
-	MOVQ R8, 0(AX)
-	MOVQ R9, 8(AX)
-	MOVQ R10, 16(AX)
-	MOVQ R11, 24(AX)
+	MOVQ v1, 0(d)
+	MOVQ v2, 8(d)
+	MOVQ v3, 16(d)
+	MOVQ v4, 24(d)
 
-	// The number of bytes written is SI minus the old base pointer.
-	SUBQ b_base+8(FP), SI
-	MOVQ SI, ret+32(FP)
+	// The number of bytes written is p minus the old base pointer.
+	SUBQ b_base+8(FP), p
+	MOVQ p, ret+32(FP)
 
 	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 4d64a17..ae7d4d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -1,13 +1,17 @@
-// +build gc,!purego,!noasm
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
 
 #include "textflag.h"
 
-// Register allocation.
+// Registers:
 #define digest	R1
-#define h	R2 // Return value.
-#define p	R3 // Input pointer.
-#define len	R4
-#define nblocks	R5 // len / 32.
+#define h	R2 // return value
+#define p	R3 // input pointer
+#define n	R4 // input length
+#define nblocks	R5 // n / 32
 #define prime1	R7
 #define prime2	R8
 #define prime3	R9
@@ -25,60 +29,52 @@
 #define round(acc, x) \
 	MADD prime2, acc, x, acc \
 	ROR  $64-31, acc         \
-	MUL  prime1, acc         \
+	MUL  prime1, acc
 
-// x = round(0, x).
+// round0 performs the operation x = round(0, x).
 #define round0(x) \
 	MUL prime2, x \
 	ROR $64-31, x \
-	MUL prime1, x \
+	MUL prime1, x
 
-#define mergeRound(x) \
-	round0(x)                 \
-	EOR  x, h                 \
-	MADD h, prime4, prime1, h \
+#define mergeRound(acc, x) \
+	round0(x)                     \
+	EOR  x, acc                   \
+	MADD acc, prime4, prime1, acc
 
-// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
-#define blocksLoop() \
-	LSR     $5, len, nblocks \
-	PCALIGN $16              \
-	loop:                    \
-	LDP.P   32(p), (x1, x2)  \
-	round(v1, x1)            \
-	LDP     -16(p), (x3, x4) \
-	round(v2, x2)            \
-	SUB     $1, nblocks      \
-	round(v3, x3)            \
-	round(v4, x4)            \
-	CBNZ    nblocks, loop    \
-
-// The primes are repeated here to ensure that they're stored
-// in a contiguous array, so we can load them with LDP.
-DATA primes<> +0(SB)/8, $11400714785074694791
-DATA primes<> +8(SB)/8, $14029467366897019727
-DATA primes<>+16(SB)/8, $1609587929392839161
-DATA primes<>+24(SB)/8, $9650029242287828579
-DATA primes<>+32(SB)/8, $2870177450012600261
-GLOBL primes<>(SB), NOPTR+RODATA, $40
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+	LSR     $5, n, nblocks  \
+	PCALIGN $16             \
+	loop:                   \
+	LDP.P   16(p), (x1, x2) \
+	LDP.P   16(p), (x3, x4) \
+	round(v1, x1)           \
+	round(v2, x2)           \
+	round(v3, x3)           \
+	round(v4, x4)           \
+	SUB     $1, nblocks     \
+	CBNZ    nblocks, loop
 
 // func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
-	LDP b_base+0(FP), (p, len)
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+	LDP b_base+0(FP), (p, n)
 
-	LDP  primes<> +0(SB), (prime1, prime2)
-	LDP  primes<>+16(SB), (prime3, prime4)
-	MOVD primes<>+32(SB), prime5
+	LDP  ·primes+0(SB), (prime1, prime2)
+	LDP  ·primes+16(SB), (prime3, prime4)
+	MOVD ·primes+32(SB), prime5
 
-	CMP  $32, len
-	CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
-	BLO  afterLoop
+	CMP  $32, n
+	CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+	BLT  afterLoop
 
 	ADD  prime1, prime2, v1
 	MOVD prime2, v2
 	MOVD $0, v3
 	NEG  prime1, v4
 
-	blocksLoop()
+	blockLoop()
 
 	ROR $64-1, v1, x1
 	ROR $64-7, v2, x2
@@ -88,71 +84,75 @@
 	ADD x3, x4
 	ADD x2, x4, h
 
-	mergeRound(v1)
-	mergeRound(v2)
-	mergeRound(v3)
-	mergeRound(v4)
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
 
 afterLoop:
-	ADD len, h
+	ADD n, h
 
-	TBZ   $4, len, try8
+	TBZ   $4, n, try8
 	LDP.P 16(p), (x1, x2)
 
 	round0(x1)
+
+	// NOTE: here and below, sequencing the EOR after the ROR (using a
+	// rotated register) is worth a small but measurable speedup for small
+	// inputs.
 	ROR  $64-27, h
 	EOR  x1 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 	round0(x2)
 	ROR  $64-27, h
-	EOR  x2 @> 64-27, h
+	EOR  x2 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 try8:
-	TBZ    $3, len, try4
+	TBZ    $3, n, try4
 	MOVD.P 8(p), x1
 
 	round0(x1)
 	ROR  $64-27, h
-	EOR  x1 @> 64-27, h
+	EOR  x1 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 try4:
-	TBZ     $2, len, try2
+	TBZ     $2, n, try2
 	MOVWU.P 4(p), x2
 
 	MUL  prime1, x2
 	ROR  $64-23, h
-	EOR  x2 @> 64-23, h
+	EOR  x2 @> 64-23, h, h
 	MADD h, prime3, prime2, h
 
 try2:
-	TBZ     $1, len, try1
+	TBZ     $1, n, try1
 	MOVHU.P 2(p), x3
 	AND     $255, x3, x1
 	LSR     $8, x3, x2
 
 	MUL prime5, x1
 	ROR $64-11, h
-	EOR x1 @> 64-11, h
+	EOR x1 @> 64-11, h, h
 	MUL prime1, h
 
 	MUL prime5, x2
 	ROR $64-11, h
-	EOR x2 @> 64-11, h
+	EOR x2 @> 64-11, h, h
 	MUL prime1, h
 
 try1:
-	TBZ   $0, len, end
+	TBZ   $0, n, finalize
 	MOVBU (p), x4
 
 	MUL prime5, x4
 	ROR $64-11, h
-	EOR x4 @> 64-11, h
+	EOR x4 @> 64-11, h, h
 	MUL prime1, h
 
-end:
+finalize:
 	EOR h >> 33, h
 	MUL prime2, h
 	EOR h >> 29, h
@@ -162,25 +162,23 @@
 	MOVD h, ret+24(FP)
 	RET
 
-// func writeBlocks(d *Digest, b []byte) int
-//
-// Assumes len(b) >= 32.
-TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
-	LDP primes<>(SB), (prime1, prime2)
+// func writeBlocks(s *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+	LDP ·primes+0(SB), (prime1, prime2)
 
 	// Load state. Assume v[1-4] are stored contiguously.
-	MOVD d+0(FP), digest
+	MOVD s+0(FP), digest
 	LDP  0(digest), (v1, v2)
 	LDP  16(digest), (v3, v4)
 
-	LDP b_base+8(FP), (p, len)
+	LDP b_base+8(FP), (p, n)
 
-	blocksLoop()
+	blockLoop()
 
 	// Store updated state.
 	STP (v1, v2), 0(digest)
 	STP (v3, v4), 16(digest)
 
-	BIC  $31, len
-	MOVD len, ret+32(FP)
+	BIC  $31, n
+	MOVD n, ret+32(FP)
 	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 1a1fac9..d4221ed 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -13,4 +13,4 @@
 func Sum64(b []byte) uint64
 
 //go:noescape
-func writeBlocks(d *Digest, b []byte) int
+func writeBlocks(s *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 209cb4a..0be16ce 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -15,10 +15,10 @@
 	var h uint64
 
 	if n >= 32 {
-		v1 := prime1v + prime2
+		v1 := primes[0] + prime2
 		v2 := prime2
 		v3 := uint64(0)
-		v4 := -prime1v
+		v4 := -primes[0]
 		for len(b) >= 32 {
 			v1 = round(v1, u64(b[0:8:len(b)]))
 			v2 = round(v2, u64(b[8:16:len(b)]))
@@ -37,19 +37,18 @@
 
 	h += uint64(n)
 
-	i, end := 0, len(b)
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(b[i:i+8:len(b)]))
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for ; i < end; i++ {
-		h ^= uint64(b[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
 	}
 
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
new file mode 100644
index 0000000..f41932b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+//	len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
new file mode 100644
index 0000000..0782b86
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -0,0 +1,66 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+	MOVQ a_base+0(FP), AX
+	MOVQ b_base+24(FP), CX
+	MOVQ a_len+8(FP), DX
+
+	// matchLen
+	XORL SI, SI
+	CMPL DX, $0x08
+	JB   matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+	MOVQ (AX)(SI*1), BX
+	XORQ (CX)(SI*1), BX
+	JZ   matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+	TZCNTQ BX, BX
+#else
+	BSFQ BX, BX
+#endif
+	SHRL $0x03, BX
+	LEAL (SI)(BX*1), SI
+	JMP  gen_match_len_end
+
+matchlen_loop_standalone:
+	LEAL -8(DX), DX
+	LEAL 8(SI), SI
+	CMPL DX, $0x08
+	JAE  matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+	CMPL DX, $0x04
+	JB   matchlen_match2_standalone
+	MOVL (AX)(SI*1), BX
+	CMPL (CX)(SI*1), BX
+	JNE  matchlen_match2_standalone
+	LEAL -4(DX), DX
+	LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+	CMPL DX, $0x02
+	JB   matchlen_match1_standalone
+	MOVW (AX)(SI*1), BX
+	CMPW (CX)(SI*1), BX
+	JNE  matchlen_match1_standalone
+	LEAL -2(DX), DX
+	LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+	CMPL DX, $0x01
+	JB   gen_match_len_end
+	MOVB (AX)(SI*1), BL
+	CMPB (CX)(SI*1), BL
+	JNE  gen_match_len_end
+	INCL SI
+
+gen_match_len_end:
+	MOVQ SI, ret+48(FP)
+	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
new file mode 100644
index 0000000..bea1779
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -0,0 +1,38 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+	left := len(a)
+	for left >= 8 {
+		diff := le.Load64(a, n) ^ le.Load64(b, n)
+		if diff != 0 {
+			return n + bits.TrailingZeros64(diff)>>3
+		}
+		n += 8
+		left -= 8
+	}
+	a = a[n:]
+	b = b[n:]
+
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index df04472..9a7de82 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -99,6 +99,21 @@
 	return nil
 }
 
+func (s *sequenceDecs) freeDecoders() {
+	if f := s.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.litLengths.fse = nil
+	}
+	if f := s.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.offsets.fse = nil
+	}
+	if f := s.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.matchLengths.fse = nil
+	}
+}
+
 // execute will execute the decoded sequence with the provided history.
 // The sequence must be evaluated before being sent.
 func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@@ -221,13 +236,16 @@
 		maxBlockSize = s.windowSize
 	}
 
+	if debugDecoder {
+		println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
+	}
 	for i := seqs - 1; i >= 0; i-- {
 		if br.overread() {
-			printf("reading sequence %d, exceeded available data\n", seqs-i)
+			printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
 			return io.ErrUnexpectedEOF
 		}
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
@@ -299,7 +317,7 @@
 		}
 		size := ll + ml + len(out)
 		if size-startSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		if size > cap(out) {
 			// Not enough size, which can happen under high volume block streaming conditions
@@ -409,9 +427,8 @@
 		}
 	}
 
-	// Check if space for literals
-	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+	if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 
 	// Add final literals
@@ -435,18 +452,13 @@
 
 	// extra bits are stored in reverse order.
 	br.fill()
-	if s.maxBits <= 32 {
-		mo += br.getBits(moB)
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-	} else {
-		mo += br.getBits(moB)
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
 		br.fill()
-		// matchlength+literal length, max 32 bits
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-
 	}
+	// matchlength+literal length, max 32 bits
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
 	mo = s.adjustOffset(mo, ll, moB)
 	return
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 7598c10..c59f17e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -5,6 +5,7 @@
 
 import (
 	"fmt"
+	"io"
 
 	"github.com/klauspost/compress/internal/cpuinfo"
 )
@@ -32,18 +33,22 @@
 // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
@@ -130,20 +135,23 @@
 		return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
 			ctx.ll, ctx.litRemain+ctx.ll)
 
+	case errorOverread:
+		return true, io.ErrUnexpectedEOF
+
 	case errorNotEnoughSpace:
 		size := ctx.outPosition + ctx.ll + ctx.ml
 		if debugDecoder {
 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 		}
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 
 	default:
-		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+		return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
 	}
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
@@ -198,23 +206,30 @@
 // error reported when capacity of `out` is too small
 const errorNotEnoughSpace = 5
 
+// error reported when bits are overread.
+const errorOverread = 6
+
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
@@ -239,6 +254,10 @@
 		litRemain: len(s.literals),
 	}
 
+	if debugDecoder {
+		println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
+	}
+
 	s.seqSize = 0
 	lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
 	var errCode int
@@ -269,9 +288,11 @@
 		case errorNotEnoughLiterals:
 			ll := ctx.seqs[i].ll
 			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+		case errorOverread:
+			return io.ErrUnexpectedEOF
 		}
 
-		return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+		return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
 	}
 
 	if ctx.litRemain < 0 {
@@ -281,7 +302,10 @@
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+	}
+	if debugDecoder {
+		println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
 	}
 	err := br.close()
 	if err != nil {
@@ -308,10 +332,12 @@
 // Returns false if a match offset is too big.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 
 // Same as above, but with safe memcopies
+//
 //go:noescape
 func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 27e7677..a708ca6 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1,16 +1,15 @@
 // Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -39,7 +38,7 @@
 
 sequenceDecs_decode_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_amd64_fill_end
+	JLE     sequenceDecs_decode_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -50,6 +49,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_amd64_fill_byte_by_byte
 
+sequenceDecs_decode_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -106,7 +109,7 @@
 
 sequenceDecs_decode_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_amd64_fill_2_end
+	JLE     sequenceDecs_decode_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -117,6 +120,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decode_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -150,8 +157,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R14
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -170,8 +176,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R14
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -190,8 +195,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R14
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -294,9 +298,9 @@
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -321,18 +325,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -361,7 +366,7 @@
 
 sequenceDecs_decode_56_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_56_amd64_fill_end
+	JLE     sequenceDecs_decode_56_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_56_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -372,6 +377,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_56_amd64_fill_byte_by_byte
 
+sequenceDecs_decode_56_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_56_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -447,8 +456,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R14
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -467,8 +475,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R14
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -487,8 +494,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R14
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -591,9 +597,9 @@
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -618,18 +624,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -658,7 +665,7 @@
 
 sequenceDecs_decode_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_bmi2_fill_end
+	JLE     sequenceDecs_decode_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -669,6 +676,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_bmi2_fill_byte_by_byte
 
+sequenceDecs_decode_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -709,7 +720,7 @@
 
 sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_bmi2_fill_2_end
+	JLE     sequenceDecs_decode_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -720,6 +731,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decode_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -751,11 +766,10 @@
 	BZHIQ   R14, R15, R15
 
 	// Update Offset State
-	BZHIQ  R8, R15, CX
-	SHRXQ  R8, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R15, CX
+	SHRXQ R8, R15, R15
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -763,11 +777,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R15, CX
-	SHRXQ  DI, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R15, CX
+	SHRXQ DI, R15, R15
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -775,10 +788,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R15, CX
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R15, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -871,9 +883,9 @@
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -898,18 +910,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -938,7 +951,7 @@
 
 sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_56_bmi2_fill_end
+	JLE     sequenceDecs_decode_56_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_56_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -949,6 +962,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_56_bmi2_fill_byte_by_byte
 
+sequenceDecs_decode_56_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_56_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -1006,11 +1023,10 @@
 	BZHIQ   R14, R15, R15
 
 	// Update Offset State
-	BZHIQ  R8, R15, CX
-	SHRXQ  R8, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R15, CX
+	SHRXQ R8, R15, R15
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -1018,11 +1034,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R15, CX
-	SHRXQ  DI, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R15, CX
+	SHRXQ DI, R15, R15
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -1030,10 +1045,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R15, CX
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R15, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -1126,9 +1140,9 @@
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -1153,8 +1167,9 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
@@ -1390,8 +1405,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1403,8 +1417,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1748,8 +1761,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1761,8 +1773,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1774,11 +1785,11 @@
 // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -1803,7 +1814,7 @@
 	MOVQ    40(SP), AX
 	ADDQ    AX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R10, 32(SP)
 
 	// outBase += outPosition
@@ -1825,7 +1836,7 @@
 
 sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_amd64_fill_end
+	JLE     sequenceDecs_decodeSync_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -1836,6 +1847,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_amd64_fill_byte_by_byte
 
+sequenceDecs_decodeSync_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -1892,7 +1907,7 @@
 
 sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_amd64_fill_2_end
+	JLE     sequenceDecs_decodeSync_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -1903,6 +1918,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -1936,8 +1955,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R13
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -1956,8 +1974,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R13
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -1976,8 +1993,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R13
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -2264,9 +2280,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -2312,6 +2328,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -2326,11 +2347,11 @@
 // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -2355,7 +2376,7 @@
 	MOVQ    40(SP), CX
 	ADDQ    CX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R9, 32(SP)
 
 	// outBase += outPosition
@@ -2377,7 +2398,7 @@
 
 sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_bmi2_fill_end
+	JLE     sequenceDecs_decodeSync_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -2388,6 +2409,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
 
+sequenceDecs_decodeSync_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -2428,7 +2453,7 @@
 
 sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_bmi2_fill_2_end
+	JLE     sequenceDecs_decodeSync_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -2439,6 +2464,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -2470,11 +2499,10 @@
 	BZHIQ   R13, R14, R14
 
 	// Update Offset State
-	BZHIQ  R8, R14, CX
-	SHRXQ  R8, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R14, CX
+	SHRXQ R8, R14, R14
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -2482,11 +2510,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R14, CX
-	SHRXQ  DI, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R14, CX
+	SHRXQ DI, R14, R14
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -2494,10 +2521,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R14, CX
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R14, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -2774,9 +2800,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -2822,6 +2848,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -2836,11 +2867,11 @@
 // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -2865,7 +2896,7 @@
 	MOVQ    40(SP), AX
 	ADDQ    AX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R10, 32(SP)
 
 	// outBase += outPosition
@@ -2887,7 +2918,7 @@
 
 sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_safe_amd64_fill_end
+	JLE     sequenceDecs_decodeSync_safe_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -2898,6 +2929,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
 
+sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -2954,7 +2989,7 @@
 
 sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_end
+	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -2965,6 +3000,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -2998,8 +3037,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R13
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3018,8 +3056,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R13
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3038,8 +3075,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R13
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3428,9 +3464,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -3476,6 +3512,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -3490,11 +3531,11 @@
 // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -3519,7 +3560,7 @@
 	MOVQ    40(SP), CX
 	ADDQ    CX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R9, 32(SP)
 
 	// outBase += outPosition
@@ -3541,7 +3582,7 @@
 
 sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_end
+	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -3552,6 +3593,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
 
+sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -3592,7 +3637,7 @@
 
 sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -3603,6 +3648,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -3634,11 +3683,10 @@
 	BZHIQ   R13, R14, R14
 
 	// Update Offset State
-	BZHIQ  R8, R14, CX
-	SHRXQ  R8, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R14, CX
+	SHRXQ R8, R14, R14
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -3646,11 +3694,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R14, CX
-	SHRXQ  DI, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R14, CX
+	SHRXQ DI, R14, R14
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -3658,10 +3705,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R14, CX
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R14, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -4040,9 +4086,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -4088,6 +4134,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index c3452bc..7cec219 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@
 	}
 	for i := range seqs {
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
@@ -111,7 +111,7 @@
 		}
 		s.seqSize += ll + ml
 		if s.seqSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		litRemain -= ll
 		if litRemain < 0 {
@@ -149,7 +149,7 @@
 	}
 	s.seqSize += litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174..65045ea 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@
 func llCode(litLength uint32) uint8 {
 	const llDeltaCode = 19
 	if litLength <= 63 {
-		// Compiler insists on bounds check (Go 1.12)
 		return llCodeTable[litLength&63]
 	}
 	return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@
 func mlCode(mlBase uint32) uint8 {
 	const mlDeltaCode = 36
 	if mlBase <= 127 {
-		// Compiler insists on bounds check (Go 1.12)
 		return mlCodeTable[mlBase&127]
 	}
 	return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index 9e1baad..a17381b 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -95,10 +95,9 @@
 	var written int64
 	var readHeader bool
 	{
-		var header []byte
-		var n int
-		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+		header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
 
+		var n int
 		n, r.err = w.Write(header)
 		if r.err != nil {
 			return written, r.err
@@ -198,7 +197,7 @@
 
 			n, r.err = w.Write(r.block.output)
 			if r.err != nil {
-				return written, err
+				return written, r.err
 			}
 			written += int64(n)
 			continue
@@ -240,7 +239,7 @@
 			}
 			n, r.err = w.Write(r.block.output)
 			if r.err != nil {
-				return written, err
+				return written, r.err
 			}
 			written += int64(n)
 			continue
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 3eb3f1c..6252b46 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,11 +5,11 @@
 
 import (
 	"bytes"
-	"encoding/binary"
 	"errors"
 	"log"
 	"math"
-	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // enable debug printing
@@ -36,9 +36,6 @@
 // zstdMinMatch is the minimum zstd match length.
 const zstdMinMatch = 3
 
-// Reset the buffer offset when reaching this.
-const bufferReset = math.MaxInt32 - MaxWindowSize
-
 // fcsUnknown is used for unknown frame content size.
 const fcsUnknown = math.MaxUint64
 
@@ -75,7 +72,6 @@
 	ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
 
 	// ErrUnknownDictionary is returned if the dictionary ID is unknown.
-	// For the time being dictionaries are not supported.
 	ErrUnknownDictionary = errors.New("unknown dictionary")
 
 	// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@@ -93,6 +89,10 @@
 	// Close has been called.
 	ErrDecoderClosed = errors.New("decoder used after Close")
 
+	// ErrEncoderClosed will be returned if the Encoder was used after
+	// Close has been called.
+	ErrEncoderClosed = errors.New("encoder used after Close")
+
 	// ErrDecoderNilInput is returned when a nil Reader was provided
 	// and an operation other than Reset/DecodeAll/Close was attempted.
 	ErrDecoderNilInput = errors.New("nil input provided as reader")
@@ -110,38 +110,12 @@
 	}
 }
 
-// matchLen returns the maximum length.
-// a must be the shortest of the two.
-// The function also returns whether all bytes matched.
-func matchLen(a, b []byte) int {
-	b = b[:len(a)]
-	for i := 0; i < len(a)-7; i += 8 {
-		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
-			return i + (bits.TrailingZeros64(diff) >> 3)
-		}
-	}
-
-	checked := (len(a) >> 3) << 3
-	a = a[checked:]
-	b = b[checked:]
-	for i := range a {
-		if a[i] != b[i] {
-			return i + checked
-		}
-	}
-	return len(a) + checked
-}
-
 func load3232(b []byte, i int32) uint32 {
-	return binary.LittleEndian.Uint32(b[i:])
+	return le.Load32(b, i)
 }
 
 func load6432(b []byte, i int32) uint64 {
-	return binary.LittleEndian.Uint64(b[i:])
-}
-
-func load64(b []byte, i int) uint64 {
-	return binary.LittleEndian.Uint64(b[i:])
+	return le.Load64(b, i)
 }
 
 type byter interface {
diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE
new file mode 100644
index 0000000..bbc7b89
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile
new file mode 100644
index 0000000..e33ee17
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+	gofmt -w *.go
+
+docs:
+	gomake clean
+	godoc ${TARG} > README.txt
diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+    Type, SubType string
+    Q             float32
+    Params        map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+	.hg
diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go
new file mode 100644
index 0000000..1dd1cad
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/autoneg.go
@@ -0,0 +1,189 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package goautoneg
+
+import (
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+	Type, SubType string
+	Q             float64
+	Params        map[string]string
+}
+
+// acceptSlice is defined to implement sort interface.
+type acceptSlice []Accept
+
+func (slice acceptSlice) Len() int {
+	return len(slice)
+}
+
+func (slice acceptSlice) Less(i, j int) bool {
+	ai, aj := slice[i], slice[j]
+	if ai.Q > aj.Q {
+		return true
+	}
+	if ai.Type != "*" && aj.Type == "*" {
+		return true
+	}
+	if ai.SubType != "*" && aj.SubType == "*" {
+		return true
+	}
+	return false
+}
+
+func (slice acceptSlice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+	return r == ' '
+}
+
+func nextSplitElement(s, sep string) (item string, remaining string) {
+	if index := strings.Index(s, sep); index != -1 {
+		return s[:index], s[index+1:]
+	}
+	return s, ""
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) acceptSlice {
+	partsCount := 0
+	remaining := header
+	for len(remaining) > 0 {
+		partsCount++
+		_, remaining = nextSplitElement(remaining, ",")
+	}
+	accept := make(acceptSlice, 0, partsCount)
+
+	remaining = header
+	var part string
+	for len(remaining) > 0 {
+		part, remaining = nextSplitElement(remaining, ",")
+		part = strings.TrimFunc(part, stringTrimSpaceCutset)
+
+		a := Accept{
+			Q: 1.0,
+		}
+
+		sp, remainingPart := nextSplitElement(part, ";")
+
+		sp0, spRemaining := nextSplitElement(sp, "/")
+		a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
+
+		switch {
+		case len(spRemaining) == 0:
+			if a.Type == "*" {
+				a.SubType = "*"
+			} else {
+				continue
+			}
+		default:
+			var sp1 string
+			sp1, spRemaining = nextSplitElement(spRemaining, "/")
+			if len(spRemaining) > 0 {
+				continue
+			}
+			a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+		}
+
+		if len(remainingPart) == 0 {
+			accept = append(accept, a)
+			continue
+		}
+
+		a.Params = make(map[string]string)
+		for len(remainingPart) > 0 {
+			sp, remainingPart = nextSplitElement(remainingPart, ";")
+			sp0, spRemaining = nextSplitElement(sp, "=")
+			if len(spRemaining) == 0 {
+				continue
+			}
+			var sp1 string
+			sp1, spRemaining = nextSplitElement(spRemaining, "=")
+			if len(spRemaining) != 0 {
+				continue
+			}
+			token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
+			if token == "q" {
+				a.Q, _ = strconv.ParseFloat(sp1, 32)
+			} else {
+				a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+			}
+		}
+
+		accept = append(accept, a)
+	}
+
+	sort.Sort(accept)
+	return accept
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+	asp := make([][]string, 0, len(alternatives))
+	for _, ctype := range alternatives {
+		asp = append(asp, strings.SplitN(ctype, "/", 2))
+	}
+	for _, clause := range ParseAccept(header) {
+		for i, ctsp := range asp {
+			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == ctsp[0] && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == "*" && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
index 8cc4e52..8d0fab8 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
@@ -56,7 +56,7 @@
 	logger.Debug(ctx, "creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	var defaultLogLevel string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
index 8f4131e..6b5ce53 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
@@ -44,7 +44,7 @@
 	logger.Debug(ctx, "creating-new-component-log-features-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
index bb99970..8c31295 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
@@ -179,6 +179,21 @@
 	return alive
 }
 
+// KeyExists checks if the specified key exists in kv-store or not
+func (b *Backend) KeyExists(ctx context.Context, key string) (bool, error) {
+	span, ctx := log.CreateChildSpan(ctx, "kvs-key-exists")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "checking-key-exists", log.Fields{"key": key, "path": formattedPath})
+
+	keyExists, err := b.Client.KeyExists(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(err))
+
+	return keyExists, err
+}
+
 // List retrieves one or more items that match the specified key
 func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
 	span, ctx := log.CreateChildSpan(ctx, "kvs-list")
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
index 85bc5f5..c84eacf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
@@ -75,6 +75,7 @@
 // Client represents the set of APIs a KV Client must implement
 type Client interface {
 	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	KeyExists(ctx context.Context, key string) (bool, error)
 	Get(ctx context.Context, key string) (*KVPair, error)
 	GetWithPrefix(ctx context.Context, prefixKey string) (map[string]*KVPair, error)
 	GetWithPrefixKeysOnly(ctx context.Context, prefixKey string) ([]string, error)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
index c5df1d8..b1080ef 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
@@ -25,8 +25,9 @@
 	"time"
 
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
-	v3Client "go.etcd.io/etcd/clientv3"
-	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	v3rpcTypes "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+
+	clientv3 "go.etcd.io/etcd/client/v3"
 )
 
 const (
@@ -35,15 +36,17 @@
 )
 
 const (
-	defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
-	defaultMaxPoolUsage    = 100  // Maximum concurrent request an Etcd Client is allowed to process
+	defaultMaxPoolCapacity         = 1000            // Default size of an Etcd Client pool
+	defaultMaxPoolUsage            = 100             // Maximum concurrent request an Etcd Client is allowed to process
+	defaultMaxAttempts             = 10              // Default number of attempts to retry an operation
+	defaultOperationContextTimeout = 3 * time.Second // Default context timeout for operations
 )
 
 // EtcdClient represents the Etcd KV store client
 type EtcdClient struct {
 	pool               EtcdClientAllocator
 	watchedChannels    sync.Map
-	watchedClients     map[string]*v3Client.Client
+	watchedClients     map[string]*clientv3.Client
 	watchedClientsLock sync.RWMutex
 }
 
@@ -82,7 +85,7 @@
 	logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
 
 	return &EtcdClient{pool: pool,
-		watchedClients: make(map[string]*v3Client.Client),
+		watchedClients: make(map[string]*clientv3.Client),
 	}, nil
 }
 
@@ -101,6 +104,25 @@
 	return true
 }
 
+// KeyExists returns boolean value based on the existence of the key in kv-store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) KeyExists(ctx context.Context, key string) (bool, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return false, err
+	}
+	defer c.pool.Put(client)
+	resp, err := client.Get(ctx, key, clientv3.WithKeysOnly(), clientv3.WithCountOnly())
+	if err != nil {
+		logger.Error(ctx, err)
+		return false, err
+	}
+	if resp.Count > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
 // List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
 // wait for a response
 func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
@@ -109,7 +131,7 @@
 		return nil, err
 	}
 	defer c.pool.Put(client)
-	resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+	resp, err := client.Get(ctx, key, clientv3.WithPrefix())
 
 	if err != nil {
 		logger.Error(ctx, err)
@@ -132,35 +154,49 @@
 	defer c.pool.Put(client)
 
 	attempt := 0
-
 startLoop:
 	for {
-		resp, err := client.Get(ctx, key)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		resp, err := client.Get(retryCtx, key)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "get-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return nil, err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return nil, err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return nil, err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return nil, err
-				}
-				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return nil, err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return nil, err
+			}
+			logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 
 		for _, ev := range resp.Kvs {
@@ -182,7 +218,7 @@
 	defer c.pool.Put(client)
 
 	// Fetch keys with the prefix
-	resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix())
+	resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix())
 	if err != nil {
 		return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
 	}
@@ -208,7 +244,7 @@
 	defer c.pool.Put(client)
 
 	// Fetch keys with the prefix
-	resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix(), v3Client.WithKeysOnly())
+	resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix(), clientv3.WithKeysOnly())
 	if err != nil {
 		return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
 	}
@@ -226,7 +262,6 @@
 // accepts only a string as a value for a put operation. Timeout defines how long the function will
 // wait for a response
 func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
-
 	// Validate that we can convert value to a string as etcd API expects a string
 	var val string
 	var err error
@@ -243,32 +278,47 @@
 	attempt := 0
 startLoop:
 	for {
-		_, err = client.Put(ctx, key, val)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		_, err = client.Put(retryCtx, key, val)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "put-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return err
-				}
-				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return err
+			}
+			logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 		return nil
 	}
@@ -286,32 +336,47 @@
 	attempt := 0
 startLoop:
 	for {
-		_, err = client.Delete(ctx, key)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		_, err = client.Delete(retryCtx, key)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "delete-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return err
-				}
-				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return err
+			}
+			logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
 		return nil
@@ -327,7 +392,7 @@
 	defer c.pool.Put(client)
 
 	//delete the prefix
-	if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+	if _, err := client.Delete(ctx, prefixKey, clientv3.WithPrefix()); err != nil {
 		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
 		return err
 	}
@@ -353,11 +418,11 @@
 	}
 	c.watchedClientsLock.Unlock()
 
-	w := v3Client.NewWatcher(client)
+	w := clientv3.NewWatcher(client)
 	ctx, cancel := context.WithCancel(ctx)
-	var channel v3Client.WatchChan
+	var channel clientv3.WatchChan
 	if withPrefix {
-		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+		channel = w.Watch(ctx, key, clientv3.WithPrefix())
 	} else {
 		channel = w.Watch(ctx, key)
 	}
@@ -366,7 +431,7 @@
 	ch := make(chan *Event, maxClientChannelBufferSize)
 
 	// Keep track of the created channels so they can be closed when required
-	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap := make(map[chan *Event]clientv3.Watcher)
 	channelMap[ch] = w
 	channelMaps := c.addChannelMap(key, channelMap)
 
@@ -380,33 +445,33 @@
 
 }
 
-func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]clientv3.Watcher) []map[chan *Event]clientv3.Watcher {
 	var channels interface{}
 	var exists bool
 
 	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+		channels = append(channels.([]map[chan *Event]clientv3.Watcher), channelMap)
 	} else {
-		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+		channels = []map[chan *Event]clientv3.Watcher{channelMap}
 	}
 	c.watchedChannels.Store(key, channels)
 
-	return channels.([]map[chan *Event]v3Client.Watcher)
+	return channels.([]map[chan *Event]clientv3.Watcher)
 }
 
-func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]clientv3.Watcher {
 	var channels interface{}
 	var exists bool
 
 	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		channels = append(channels.([]map[chan *Event]clientv3.Watcher)[:pos], channels.([]map[chan *Event]clientv3.Watcher)[pos+1:]...)
 		c.watchedChannels.Store(key, channels)
 	}
 
-	return channels.([]map[chan *Event]v3Client.Watcher)
+	return channels.([]map[chan *Event]clientv3.Watcher)
 }
 
-func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]clientv3.Watcher, bool) {
 	var channels interface{}
 	var exists bool
 
@@ -416,14 +481,14 @@
 		return nil, exists
 	}
 
-	return channels.([]map[chan *Event]v3Client.Watcher), exists
+	return channels.([]map[chan *Event]clientv3.Watcher), exists
 }
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
 func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// Get the array of channels mapping
-	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var watchedChannels []map[chan *Event]clientv3.Watcher
 	var ok bool
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
@@ -463,7 +528,7 @@
 	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel clientv3.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
 	logger.Debug(ctx, "start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
@@ -475,11 +540,11 @@
 	logger.Debug(ctx, "stop-listening-on-channel ...")
 }
 
-func getEventType(event *v3Client.Event) int {
+func getEventType(event *clientv3.Event) int {
 	switch event.Type {
-	case v3Client.EventTypePut:
+	case clientv3.EventTypePut:
 		return PUT
-	case v3Client.EventTypeDelete:
+	case clientv3.EventTypeDelete:
 		return DELETE
 	}
 	return UNKNOWN
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
index 6c7c838..68095c4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
@@ -23,7 +23,7 @@
 	"time"
 
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
-	"go.etcd.io/etcd/clientv3"
+	clientv3 "go.etcd.io/etcd/client/v3"
 )
 
 // EtcdClientAllocator represents a generic interface to allocate an Etcd Client
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
index 7ed4159..3a7e512 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
@@ -120,6 +120,19 @@
 	return allkeys, nil
 }
 
+func (c *RedisClient) KeyExists(ctx context.Context, key string) (bool, error) {
+	var err error
+	var keyCount int64
+
+	if keyCount, err = c.redisAPI.Exists(ctx, key).Result(); err != nil {
+		return false, err
+	}
+	if keyCount > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
 func (c *RedisClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
 	var err error
 	var keys []string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
index a265392..1eb2409 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
@@ -128,8 +128,8 @@
 /* Send out device events with key*/
 func (ep *EventProxy) SendDeviceEventWithKey(ctx context.Context, deviceEvent *voltha.DeviceEvent, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64, key string) error {
 	if deviceEvent == nil {
-		logger.Error(ctx, "Recieved empty device event")
-		return errors.New("Device event nil")
+		logger.Error(ctx, "recieved empty device event")
+		return errors.New("device event nil")
 	}
 	var event voltha.Event
 	var de voltha.Event_DeviceEvent
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
index 04c248c..8a6333f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+//nolint:staticcheck
 package flows
 
 import (
@@ -347,7 +348,14 @@
 	if flow == nil {
 		return nil
 	}
-	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	flowData, err := proto.Marshal(flow)
+	if err != nil {
+		return nil // Handle error appropriately
+	}
+	nFlow := &ofp.OfpFlowStats{}
+	if err := proto.Unmarshal(flowData, nFlow); err != nil {
+		return nil // Handle error appropriately
+	}
 	nFlow.Instructions = nil
 	nInsts := make([]*ofp.OfpInstruction, 0)
 	for _, instruction := range flow.Instructions {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
index e8a0832..6d8a93c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -25,6 +25,7 @@
 
 	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
 	"github.com/jhump/protoreflect/dynamic/grpcdynamic"
 	"github.com/jhump/protoreflect/grpcreflect"
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
@@ -60,6 +61,12 @@
 )
 
 const (
+	// [VOL-5434] Setting max receive message size to 20 MB,
+	// Default value of 'defaultServerMaxReceiveMessageSize' is 4 MB
+	grpcRecvMsgSizeLimit = 20
+)
+
+const (
 	eventConnecting = event(iota)
 	eventValidatingConnection
 	eventConnected
@@ -605,15 +612,21 @@
 	// 1. automatically inject
 	// 2. publish Open Tracing Spans by this GRPC Client
 	// 3. detect connection failure on client calls such that the reconnection process can begin
-	interceptor_opts := []grpc.UnaryClientInterceptor{grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{}))}
+	interceptor_opts := []grpc.UnaryClientInterceptor{
+		grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		grpc_prometheus.UnaryClientInterceptor,
+	}
 
+	grpc_prometheus.EnableClientHandlingTimeHistogram()
 	if len(retry_interceptor) > 0 {
 		interceptor_opts = append(interceptor_opts, retry_interceptor...)
 	}
 	conn, err := grpc.Dial(c.serverEndPoint,
 		grpc.WithInsecure(),
+		grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcRecvMsgSizeLimit*1024*1024)),
 		grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(
 			grpc_opentracing.StreamClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+			grpc_prometheus.StreamClientInterceptor,
 		)),
 		grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(interceptor_opts...)),
 	)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
index c2f4aed..815990c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
@@ -94,7 +94,7 @@
 /*
 Start prepares the GRPC server and starts servicing requests
 */
-func (s *GrpcServer) Start(ctx context.Context) {
+func (s *GrpcServer) Start(ctx context.Context, opts ...grpc.ServerOption) {
 
 	lis, err := net.Listen("tcp", s.address)
 	if err != nil {
@@ -118,10 +118,10 @@
 		}
 
 		serverOptions = append(serverOptions, grpc.Creds(creds))
-		s.gs = grpc.NewServer(serverOptions...)
+		s.gs = grpc.NewServer(append(serverOptions, opts...)...)
 	} else {
 		logger.Info(ctx, "starting-insecure-grpc-server")
-		s.gs = grpc.NewServer(serverOptions...)
+		s.gs = grpc.NewServer(append(serverOptions, opts...)...)
 	}
 
 	// Register all required services
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
index b3e20a3..77c529b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
@@ -18,6 +18,7 @@
 * release 2.9.  It is no longer used for inter voltha container
 * communication.
  */
+//nolint:staticcheck
 package kafka
 
 import (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
index 99168dc..75025f2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
@@ -13,6 +13,7 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
  */
+//nolint:staticcheck
 package kafka
 
 import (
@@ -23,8 +24,7 @@
 	"sync"
 	"time"
 
-	"github.com/Shopify/sarama"
-	scc "github.com/bsm/sarama-cluster"
+	"github.com/IBM/sarama"
 	"github.com/eapache/go-resiliency/breaker"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
@@ -48,7 +48,7 @@
 	KafkaAddress                  string
 	producer                      sarama.AsyncProducer
 	consumer                      sarama.Consumer
-	groupConsumers                map[string]*scc.Consumer
+	groupConsumers                map[string]sarama.ConsumerGroup
 	lockOfGroupConsumers          sync.RWMutex
 	consumerGroupPrefix           string
 	consumerType                  int
@@ -217,7 +217,7 @@
 		option(client)
 	}
 
-	client.groupConsumers = make(map[string]*scc.Consumer)
+	client.groupConsumers = make(map[string]sarama.ConsumerGroup)
 
 	client.lockTopicToConsumerChannelMap = sync.RWMutex{}
 	client.topicLockMap = make(map[string]*sync.RWMutex)
@@ -775,7 +775,7 @@
 					err = errTemp
 				}
 			}
-		} else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+		} else if groupConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
 			if errTemp := groupConsumer.Close(); errTemp != nil {
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
@@ -878,33 +878,28 @@
 }
 
 // createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
-	config := scc.NewConfig()
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (sarama.ConsumerGroup, error) {
+	config := sarama.NewConfig()
 	config.Version = sarama.V1_0_0_0
 	config.ClientID = uuid.New().String()
-	config.Group.Mode = scc.ConsumerModeMultiplex
+	config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
 	config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
 	config.Consumer.Return.Errors = true
-	//config.Group.Return.Notifications = false
-	//config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
-	//config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
 	config.Consumer.Offsets.Initial = initialOffset
-	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
+
 	brokers := []string{sc.KafkaAddress}
-
-	topics := []string{topic.Name}
-	var consumer *scc.Consumer
+	// topics := []string{topic.Name}
+	var consumerGroup sarama.ConsumerGroup
 	var err error
-
-	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+	if consumerGroup, err = sarama.NewConsumerGroup(brokers, groupId, config); err != nil {
+		logger.Errorw(ctx, "create-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
-	//sc.groupConsumers[topic.Name] = consumer
-	sc.addToGroupConsumers(topic.Name, consumer)
-	return consumer, nil
+	logger.Debugw(ctx, "create-group-consumer-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	//sc.groupConsumers[topic.Name] = consumerGroup
+	sc.addToGroupConsumers(topic.Name, consumerGroup)
+	return consumerGroup, nil
 }
 
 // dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
@@ -963,48 +958,59 @@
 	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumerGroup sarama.ConsumerGroup, consumerChnls *consumerChannels) {
 	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
-startloop:
-	for {
-		select {
-		case err, ok := <-consumer.Errors():
-			if ok {
+	go func() {
+		for {
+			err := consumerGroup.Consume(ctx, []string{topic.Name}, &groupConsumerHandler{
+				consumerChnls: consumerChnls,
+				sc:            sc,
+				topic:         topic,
+			})
+			if err != nil {
+				logger.Warnw(ctx, "group-consumer-error", log.Fields{"topic": topic.Name, "error": err})
 				if sc.isLivenessError(ctx, err) {
 					sc.updateLiveness(ctx, false)
 				}
-				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
-			} else {
-				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
-				// channel is closed
-				break startloop
 			}
-		case msg, ok := <-consumer.Messages():
-			if !ok {
-				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
-				// Channel closed
-				break startloop
+			select {
+			case <-sc.doneCh:
+				logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+				return
+			default:
 			}
-			sc.updateLiveness(ctx, true)
-			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
-			msgBody := msg.Value
-			var protoMsg proto.Message
-			if err := proto.Unmarshal(msgBody, protoMsg); err != nil {
-				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
-				continue
-			}
-			go sc.dispatchToConsumers(consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
-			consumer.MarkOffset(msg, "")
-		case ntf := <-consumer.Notifications():
-			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
-		case <-sc.doneCh:
-			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
-			break startloop
 		}
+	}()
+}
+
+type groupConsumerHandler struct {
+	consumerChnls *consumerChannels
+	sc            *SaramaClient
+	topic         *Topic
+}
+
+func (h *groupConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error {
+	return nil
+}
+
+func (h *groupConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error {
+	return nil
+}
+
+func (h *groupConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
+	for msg := range claim.Messages() {
+		h.sc.updateLiveness(context.Background(), true)
+		logger.Debugw(context.Background(), "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+		var protoMsg proto.Message
+		if err := proto.Unmarshal(msg.Value, protoMsg); err != nil {
+			logger.Warnw(context.Background(), "invalid-message", log.Fields{"error": err})
+			continue
+		}
+		go h.sc.dispatchToConsumers(h.consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
+		session.MarkMessage(msg, "")
 	}
-	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy(ctx)
+	return nil
 }
 
 func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
@@ -1018,7 +1024,7 @@
 	for _, consumer := range consumerCh.consumers {
 		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
-		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+		} else if gConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
 			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
 		} else {
 			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
@@ -1070,18 +1076,16 @@
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
 func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan proto.Message, error) {
-	// TODO:  Replace this development partition consumers with a group consumers
-	var pConsumer *scc.Consumer
+	var consumerGroup sarama.ConsumerGroup
 	var err error
-	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if consumerGroup, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
-	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
-	// unbuffered to verify race conditions.
+
 	consumerListeningChannel := make(chan proto.Message)
 	cc := &consumerChannels{
-		consumers: []interface{}{pConsumer},
+		consumers: []interface{}{consumerGroup},
 		channels:  []chan proto.Message{consumerListeningChannel},
 	}
 
@@ -1134,22 +1138,21 @@
 	return channels
 }
 
-func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumerGroup sarama.ConsumerGroup) {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
 	if _, exist := sc.groupConsumers[topic]; !exist {
-		sc.groupConsumers[topic] = consumer
+		sc.groupConsumers[topic] = consumerGroup
 	}
 }
 
 func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
-	if _, exist := sc.groupConsumers[topic]; exist {
-		consumer := sc.groupConsumers[topic]
+	if consumerGroup, exist := sc.groupConsumers[topic]; exist {
 		delete(sc.groupConsumers, topic)
-		if err := consumer.Close(); err != nil {
-			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+		if err := consumerGroup.Close(); err != nil {
+			logger.Errorw(ctx, "failure-closing-consumer-group", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
index af5821a..4e42c26 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -202,7 +202,7 @@
 	case "FATAL":
 		return FatalLevel, nil
 	}
-	return 0, errors.New("Given LogLevel is invalid : " + l)
+	return 0, errors.New("given LogLevel is invalid : " + l)
 }
 
 func LogLevelToString(l LogLevel) (string, error) {
@@ -218,7 +218,7 @@
 	case FatalLevel:
 		return "FATAL", nil
 	}
-	return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+	return "", fmt.Errorf("given LogLevel is invalid %d", l)
 }
 
 func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
@@ -503,13 +503,9 @@
 		}
 	}
 
-	if strings.HasSuffix(packageName, ".init") {
-		packageName = strings.TrimSuffix(packageName, ".init")
-	}
+	packageName = strings.TrimSuffix(packageName, ".init")
 
-	if strings.HasSuffix(fileName, ".go") {
-		fileName = strings.TrimSuffix(fileName, ".go")
-	}
+	fileName = strings.TrimSuffix(fileName, ".go")
 
 	return packageName, fileName, funcName, foundFrame.Line
 }
@@ -678,7 +674,7 @@
 		return loggers[pkgName], nil
 	}
 
-	return loggers[pkgName], errors.New("Package Not Found")
+	return loggers[pkgName], errors.New("package not found")
 }
 
 // UpdateAllCallerSkipLevel create new loggers for all registered pacakges with the default updated caller skipltFields.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
index e21ce0c..292a1cf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
@@ -23,13 +23,14 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opentracing/opentracing-go"
-	jtracing "github.com/uber/jaeger-client-go"
-	jcfg "github.com/uber/jaeger-client-go/config"
 	"io"
 	"os"
 	"strings"
 	"sync"
+
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
 )
 
 const (
@@ -103,7 +104,7 @@
 func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
 	lfm.componentName = os.Getenv("COMPONENT_NAME")
 	if lfm.componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	lfm.lock.Lock()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
index 6dfb0c8..4e84126 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
@@ -26,14 +26,15 @@
 
 // GetTrafficShapingInfo returns CIR,PIR and GIR values
 func GetTrafficShapingInfo(ctx context.Context, meterConfig *ofp.OfpMeterConfig) (*tp_pb.TrafficShapingInfo, error) {
-	switch meterBandSize := len(meterConfig.Bands); {
-	case meterBandSize == 1:
+	meterBandSize := len(meterConfig.Bands)
+	switch meterBandSize {
+	case 1:
 		band := meterConfig.Bands[0]
 		if band.BurstSize == 0 { // GIR = PIR, Burst Size = 0, tcont type 1
 			return &tp_pb.TrafficShapingInfo{Pir: band.Rate, Gir: band.Rate}, nil
 		}
 		return &tp_pb.TrafficShapingInfo{Pir: band.Rate, Pbs: band.BurstSize}, nil // PIR, tcont type 4
-	case meterBandSize == 2:
+	case 2:
 		firstBand, secondBand := meterConfig.Bands[0], meterConfig.Bands[1]
 		if firstBand.BurstSize == 0 && secondBand.BurstSize == 0 &&
 			firstBand.Rate == secondBand.Rate { // PIR = GIR, tcont type 1
@@ -45,7 +46,7 @@
 			}
 			return &tp_pb.TrafficShapingInfo{Pir: secondBand.Rate, Pbs: secondBand.BurstSize, Cir: firstBand.Rate, Cbs: firstBand.BurstSize}, nil
 		}
-	case meterBandSize == 3: // PIR,CIR,GIR, tcont type 5
+	case 3: // PIR,CIR,GIR, tcont type 5
 		var count, girIndex int
 		for i, band := range meterConfig.Bands {
 			if band.BurstSize == 0 { // find GIR
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
index 4b4bb1f..dba7ffa 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -240,7 +240,7 @@
 	}
 	comma := ""
 	for c, s := range p.status {
-		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+		if _, err := fmt.Fprintf(w, "%s\"%s\": \"%s\"", comma, c, s.String()); err != nil {
 			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
deleted file mode 100644
index 5e98735..0000000
--- a/vendor/github.com/pierrec/lz4/.gitignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Created by https://www.gitignore.io/api/macos
-
-### macOS ###
-*.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-# End of https://www.gitignore.io/api/macos
-
-cmd/*/*exe
-.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
deleted file mode 100644
index fd6c6db..0000000
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-env:
-  - GO111MODULE=off
-
-go:
-  - 1.9.x
-  - 1.10.x
-  - 1.11.x
-  - 1.12.x
-  - master
-
-matrix:
- fast_finish: true
- allow_failures:
-   - go: master
-
-sudo: false
-
-script: 
- - go test -v -cpu=2
- - go test -v -cpu=2 -race
- - go test -v -cpu=2 -tags noasm
- - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
deleted file mode 100644
index 4ee388e..0000000
--- a/vendor/github.com/pierrec/lz4/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# lz4 : LZ4 compression in pure Go
-
-[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
-[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
-[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
-[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
-
-## Overview
-
-This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
-The implementation is based on the reference C [one](https://github.com/lz4/lz4).
-
-## Install
-
-Assuming you have the go toolchain installed:
-
-```
-go get github.com/pierrec/lz4
-```
-
-There is a command line interface tool to compress and decompress LZ4 files.
-
-```
-go install github.com/pierrec/lz4/cmd/lz4c
-```
-
-Usage
-
-```
-Usage of lz4c:
-  -version
-        print the program version
-
-Subcommands:
-Compress the given files or from stdin to stdout.
-compress [arguments] [<file name> ...]
-  -bc
-        enable block checksum
-  -l int
-        compression level (0=fastest)
-  -sc
-        disable stream checksum
-  -size string
-        block max size [64K,256K,1M,4M] (default "4M")
-
-Uncompress the given files or from stdin to stdout.
-uncompress [arguments] [<file name> ...]
-
-```
-
-
-## Example
-
-```
-// Compress and uncompress an input string.
-s := "hello world"
-r := strings.NewReader(s)
-
-// The pipe will uncompress the data from the writer.
-pr, pw := io.Pipe()
-zw := lz4.NewWriter(pw)
-zr := lz4.NewReader(pr)
-
-go func() {
-	// Compress the input string.
-	_, _ = io.Copy(zw, r)
-	_ = zw.Close() // Make sure the writer is closed
-	_ = pw.Close() // Terminate the pipe
-}()
-
-_, _ = io.Copy(os.Stdout, zr)
-
-// Output:
-// hello world
-```
-
-## Contributing
-
-Contributions are very welcome for bug fixing, performance improvements...!
-
-- Open an issue with a proper description
-- Send a pull request with appropriate test case(s)
-
-## Contributors
-
-Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
-
-Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
-
-Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
deleted file mode 100644
index 664d9be..0000000
--- a/vendor/github.com/pierrec/lz4/block.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"math/bits"
-	"sync"
-)
-
-// blockHash hashes the lower 6 bytes into a value < htSize.
-func blockHash(x uint64) uint32 {
-	const prime6bytes = 227718039650203
-	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
-}
-
-// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
-func CompressBlockBound(n int) int {
-	return n + n/255 + 16
-}
-
-// UncompressBlock uncompresses the source buffer into the destination one,
-// and returns the uncompressed size.
-//
-// The destination buffer must be sized appropriately.
-//
-// An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte) (int, error) {
-	if len(src) == 0 {
-		return 0, nil
-	}
-	if di := decodeBlock(dst, src); di >= 0 {
-		return di, nil
-	}
-	return 0, ErrInvalidSourceShortBuffer
-}
-
-// CompressBlock compresses the source buffer into the destination one.
-// This is the fast version of LZ4 compression and also the default one.
-//
-// The argument hashTable is scratch space for a hash table used by the
-// compressor. If provided, it should have length at least 1<<16. If it is
-// shorter (or nil), CompressBlock allocates its own hash table.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
-	defer recoverBlock(&err)
-
-	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
-	isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
-	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compression.
-	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
-	const adaptSkipLog = 7
-	if len(hashTable) < htSize {
-		htIface := htPool.Get()
-		defer htPool.Put(htIface)
-		hashTable = (*(htIface).(*[htSize]int))[:]
-	}
-	// Prove to the compiler the table has at least htSize elements.
-	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
-	hashTable = hashTable[:htSize]
-
-	// si: Current position of the search.
-	// anchor: Position of the current literals.
-	var si, di, anchor int
-	sn := len(src) - mfLimit
-	if sn <= 0 {
-		goto lastLiterals
-	}
-
-	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
-	for si < sn {
-		// Hash the next 6 bytes (sequence)...
-		match := binary.LittleEndian.Uint64(src[si:])
-		h := blockHash(match)
-		h2 := blockHash(match >> 8)
-
-		// We check a match at s, s+1 and s+2 and pick the first one we get.
-		// Checking 3 only requires us to load the source one.
-		ref := hashTable[h]
-		ref2 := hashTable[h2]
-		hashTable[h] = si
-		hashTable[h2] = si + 1
-		offset := si - ref
-
-		// If offset <= 0 we got an old entry in the hash table.
-		if offset <= 0 || offset >= winSize || // Out of window.
-			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
-			// No match. Start calculating another hash.
-			// The processor can usually do this out-of-order.
-			h = blockHash(match >> 16)
-			ref = hashTable[h]
-
-			// Check the second match at si+1
-			si += 1
-			offset = si - ref2
-
-			if offset <= 0 || offset >= winSize ||
-				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
-				// No match. Check the third match at si+2
-				si += 1
-				offset = si - ref
-				hashTable[h] = si
-
-				if offset <= 0 || offset >= winSize ||
-					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
-					// Skip one extra byte (at si+3) before we check 3 matches again.
-					si += 2 + (si-anchor)>>adaptSkipLog
-					continue
-				}
-			}
-		}
-
-		// Match found.
-		lLen := si - anchor // Literal length.
-		// We already matched 4 bytes.
-		mLen := 4
-
-		// Extend backwards if we can, reducing literals.
-		tOff := si - offset - 1
-		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
-			si--
-			tOff--
-			lLen--
-			mLen++
-		}
-
-		// Add the match length, so we continue search at the end.
-		// Use mLen to store the offset base.
-		si, mLen = si+mLen, si+minMatch
-
-		// Find the longest match by looking by batches of 8 bytes.
-		for si+8 < sn {
-			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
-			if x == 0 {
-				si += 8
-			} else {
-				// Stop is first non-zero byte.
-				si += bits.TrailingZeros64(x) >> 3
-				break
-			}
-		}
-
-		mLen = si - mLen
-		if mLen < 0xF {
-			dst[di] = byte(mLen)
-		} else {
-			dst[di] = 0xF
-		}
-
-		// Encode literals length.
-		if lLen < 0xF {
-			dst[di] |= byte(lLen << 4)
-		} else {
-			dst[di] |= 0xF0
-			di++
-			l := lLen - 0xF
-			for ; l >= 0xFF; l -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(l)
-		}
-		di++
-
-		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-		di += lLen + 2
-		anchor = si
-
-		// Encode offset.
-		_ = dst[di] // Bound check elimination.
-		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
-		// Encode match length part 2.
-		if mLen >= 0xF {
-			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(mLen)
-			di++
-		}
-		// Check if we can load next values.
-		if si >= sn {
-			break
-		}
-		// Hash match end-2
-		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
-		hashTable[h] = si - 2
-	}
-
-lastLiterals:
-	if isNotCompressible && anchor == 0 {
-		// Incompressible.
-		return 0, nil
-	}
-
-	// Last literals.
-	lLen := len(src) - anchor
-	if lLen < 0xF {
-		dst[di] = byte(lLen << 4)
-	} else {
-		dst[di] = 0xF0
-		di++
-		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
-			dst[di] = 0xFF
-			di++
-		}
-		dst[di] = byte(lLen)
-	}
-	di++
-
-	// Write the last literals.
-	if isNotCompressible && di >= anchor {
-		// Incompressible.
-		return 0, nil
-	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-	return di, nil
-}
-
-// Pool of hash tables for CompressBlock.
-var htPool = sync.Pool{
-	New: func() interface{} {
-		return new([htSize]int)
-	},
-}
-
-// blockHash hashes 4 bytes into a value < winSize.
-func blockHashHC(x uint32) uint32 {
-	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
-	return x * hasher >> (32 - winSizeLog)
-}
-
-// CompressBlockHC compresses the source buffer src into the destination dst
-// with max search depth (use 0 or negative value for no max).
-//
-// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
-	defer recoverBlock(&err)
-
-	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
-	isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
-	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compression.
-	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
-	const adaptSkipLog = 7
-
-	var si, di, anchor int
-
-	// hashTable: stores the last position found for a given hash
-	// chainTable: stores previous positions for a given hash
-	var hashTable, chainTable [winSize]int
-
-	if depth <= 0 {
-		depth = winSize
-	}
-
-	sn := len(src) - mfLimit
-	if sn <= 0 {
-		goto lastLiterals
-	}
-
-	for si < sn {
-		// Hash the next 4 bytes (sequence).
-		match := binary.LittleEndian.Uint32(src[si:])
-		h := blockHashHC(match)
-
-		// Follow the chain until out of window and give the longest match.
-		mLen := 0
-		offset := 0
-		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
-			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
-			// must match to improve on the match length.
-			if src[next+mLen] != src[si+mLen] {
-				continue
-			}
-			ml := 0
-			// Compare the current position with a previous with the same hash.
-			for ml < sn-si {
-				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
-				if x == 0 {
-					ml += 8
-				} else {
-					// Stop is first non-zero byte.
-					ml += bits.TrailingZeros64(x) >> 3
-					break
-				}
-			}
-			if ml < minMatch || ml <= mLen {
-				// Match too small (<minMath) or smaller than the current match.
-				continue
-			}
-			// Found a longer match, keep its position and length.
-			mLen = ml
-			offset = si - next
-			// Try another previous position with the same hash.
-			try--
-		}
-		chainTable[si&winMask] = hashTable[h]
-		hashTable[h] = si
-
-		// No match found.
-		if mLen == 0 {
-			si += 1 + (si-anchor)>>adaptSkipLog
-			continue
-		}
-
-		// Match found.
-		// Update hash/chain tables with overlapping bytes:
-		// si already hashed, add everything from si+1 up to the match length.
-		winStart := si + 1
-		if ws := si + mLen - winSize; ws > winStart {
-			winStart = ws
-		}
-		for si, ml := winStart, si+mLen; si < ml; {
-			match >>= 8
-			match |= uint32(src[si+3]) << 24
-			h := blockHashHC(match)
-			chainTable[si&winMask] = hashTable[h]
-			hashTable[h] = si
-			si++
-		}
-
-		lLen := si - anchor
-		si += mLen
-		mLen -= minMatch // Match length does not include minMatch.
-
-		if mLen < 0xF {
-			dst[di] = byte(mLen)
-		} else {
-			dst[di] = 0xF
-		}
-
-		// Encode literals length.
-		if lLen < 0xF {
-			dst[di] |= byte(lLen << 4)
-		} else {
-			dst[di] |= 0xF0
-			di++
-			l := lLen - 0xF
-			for ; l >= 0xFF; l -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(l)
-		}
-		di++
-
-		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-		di += lLen
-		anchor = si
-
-		// Encode offset.
-		di += 2
-		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
-		// Encode match length part 2.
-		if mLen >= 0xF {
-			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(mLen)
-			di++
-		}
-	}
-
-	if isNotCompressible && anchor == 0 {
-		// Incompressible.
-		return 0, nil
-	}
-
-	// Last literals.
-lastLiterals:
-	lLen := len(src) - anchor
-	if lLen < 0xF {
-		dst[di] = byte(lLen << 4)
-	} else {
-		dst[di] = 0xF0
-		di++
-		lLen -= 0xF
-		for ; lLen >= 0xFF; lLen -= 0xFF {
-			dst[di] = 0xFF
-			di++
-		}
-		dst[di] = byte(lLen)
-	}
-	di++
-
-	// Write the last literals.
-	if isNotCompressible && di >= anchor {
-		// Incompressible.
-		return 0, nil
-	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-	return di, nil
-}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
deleted file mode 100644
index bc5e78d..0000000
--- a/vendor/github.com/pierrec/lz4/debug.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build lz4debug
-
-package lz4
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-)
-
-const debugFlag = true
-
-func debug(args ...interface{}) {
-	_, file, line, _ := runtime.Caller(1)
-	file = filepath.Base(file)
-
-	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
-	if f[len(f)-1] != '\n' {
-		f += "\n"
-	}
-	fmt.Fprintf(os.Stderr, f, args[1:]...)
-}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
deleted file mode 100644
index 44211ad..0000000
--- a/vendor/github.com/pierrec/lz4/debug_stub.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !lz4debug
-
-package lz4
-
-const debugFlag = false
-
-func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
deleted file mode 100644
index 43cc14f..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package lz4
-
-//go:noescape
-func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
deleted file mode 100644
index 20fef39..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.s
+++ /dev/null
@@ -1,375 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// AX scratch
-// BX scratch
-// CX scratch
-// DX token
-//
-// DI &dst
-// SI &src
-// R8 &dst + len(dst)
-// R9 &src + len(src)
-// R11 &dst
-// R12 short output end
-// R13 short input end
-// func decodeBlock(dst, src []byte) int
-// using 50 bytes of stack currently
-TEXT ·decodeBlock(SB), NOSPLIT, $64-56
-	MOVQ dst_base+0(FP), DI
-	MOVQ DI, R11
-	MOVQ dst_len+8(FP), R8
-	ADDQ DI, R8
-
-	MOVQ src_base+24(FP), SI
-	MOVQ src_len+32(FP), R9
-	ADDQ SI, R9
-
-	// shortcut ends
-	// short output end
-	MOVQ R8, R12
-	SUBQ $32, R12
-	// short input end
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-loop:
-	// for si < len(src)
-	CMPQ SI, R9
-	JGE end
-
-	// token := uint32(src[si])
-	MOVBQZX (SI), DX
-	INCQ SI
-
-	// lit_len = token >> 4
-	// if lit_len > 0
-	// CX = lit_len
-	MOVQ DX, CX
-	SHRQ $4, CX
-
-	// if lit_len != 0xF
-	CMPQ CX, $0xF
-	JEQ lit_len_loop_pre
-	CMPQ DI, R12
-	JGE lit_len_loop_pre
-	CMPQ SI, R13
-	JGE lit_len_loop_pre
-
-	// copy shortcut
-
-	// A two-stage shortcut for the most common case:
-	// 1) If the literal length is 0..14, and there is enough space,
-	// enter the shortcut and copy 16 bytes on behalf of the literals
-	// (in the fast mode, only 8 bytes can be safely copied this way).
-	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
-	// manner; but we ensure that there's enough space in the output for
-	// those 18 bytes earlier, upon entering the shortcut (in other words,
-	// there is a combined check for both stages).
-
-	// copy literal
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-	ADDQ CX, DI
-	ADDQ CX, SI
-
-	MOVQ DX, CX
-	ANDQ $0xF, CX
-
-	// The second stage: prepare for match copying, decode full info.
-	// If it doesn't work out, the info won't be wasted.
-	// offset := uint16(data[:2])
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	MOVQ DI, AX
-	SUBQ DX, AX
-	CMPQ AX, DI
-	JGT err_short_buf
-
-	// if we can't do the second stage then jump straight to read the
-	// match length, we already have the offset.
-	CMPQ CX, $0xF
-	JEQ match_len_loop_pre
-	CMPQ DX, $8
-	JLT match_len_loop_pre
-	CMPQ AX, R11
-	JLT err_short_buf
-
-	// memcpy(op + 0, match + 0, 8);
-	MOVQ (AX), BX
-	MOVQ BX, (DI)
-	// memcpy(op + 8, match + 8, 8);
-	MOVQ 8(AX), BX
-	MOVQ BX, 8(DI)
-	// memcpy(op +16, match +16, 2);
-	MOVW 16(AX), BX
-	MOVW BX, 16(DI)
-
-	ADDQ $4, DI // minmatch
-	ADDQ CX, DI
-
-	// shortcut complete, load next token
-	JMP loop
-
-lit_len_loop_pre:
-	// if lit_len > 0
-	CMPQ CX, $0
-	JEQ offset
-	CMPQ CX, $0xF
-	JNE copy_literal
-
-lit_len_loop:
-	// for src[si] == 0xFF
-	CMPB (SI), $0xFF
-	JNE lit_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// lit_len += 0xFF
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP lit_len_loop
-
-lit_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_literal:
-	// bounds check src and dst
-	MOVQ SI, AX
-	ADDQ CX, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// whats a good cut off to call memmove?
-	CMPQ CX, $16
-	JGT memmove_lit
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	// if len(src[si:]) < 16
-	MOVQ R9, AX
-	SUBQ SI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-
-	JMP finish_lit_copy
-
-memmove_lit:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ SI, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	MOVB DX, 48(SP)
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-	MOVB 48(SP), DX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-finish_lit_copy:
-	ADDQ CX, SI
-	ADDQ CX, DI
-
-	CMPQ SI, R9
-	JGE end
-
-offset:
-	// CX := mLen
-	// free up DX to use for offset
-	MOVQ DX, CX
-
-	MOVQ SI, AX
-	ADDQ $2, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// offset
-	// DX := int(src[si]) | int(src[si+1])<<8
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	// 0 offset is invalid
-	CMPQ DX, $0
-	JEQ err_corrupt
-
-	ANDB $0xF, CX
-
-match_len_loop_pre:
-	// if mlen != 0xF
-	CMPB CX, $0xF
-	JNE copy_match
-
-match_len_loop:
-	// for src[si] == 0xFF
-	// lit_len += 0xFF
-	CMPB (SI), $0xFF
-	JNE match_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP match_len_loop
-
-match_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_match:
-	// mLen += minMatch
-	ADDQ $4, CX
-
-	// check we have match_len bytes left in dst
-	// di+match_len < len(dst)
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// DX = offset
-	// CX = match_len
-	// BX = &dst + (di - offset)
-	MOVQ DI, BX
-	SUBQ DX, BX
-
-	// check BX is within dst
-	// if BX < &dst
-	CMPQ BX, R11
-	JLT err_short_buf
-
-	// if offset + match_len < di
-	MOVQ BX, AX
-	ADDQ CX, AX
-	CMPQ DI, AX
-	JGT copy_interior_match
-
-	// AX := len(dst[:di])
-	// MOVQ DI, AX
-	// SUBQ R11, AX
-
-	// copy 16 bytes at a time
-	// if di-offset < 16 copy 16-(di-offset) bytes to di
-	// then do the remaining
-
-copy_match_loop:
-	// for match_len >= 0
-	// dst[di] = dst[i]
-	// di++
-	// i++
-	MOVB (BX), AX
-	MOVB AX, (DI)
-	INCQ DI
-	INCQ BX
-	DECQ CX
-
-	CMPQ CX, $0
-	JGT copy_match_loop
-
-	JMP loop
-
-copy_interior_match:
-	CMPQ CX, $16
-	JGT memmove_match
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_match
-
-	MOVOU (BX), X0
-	MOVOU X0, (DI)
-
-	ADDQ CX, DI
-	JMP loop
-
-memmove_match:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ BX, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11 // TODO: make these sensible numbers
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-	ADDQ CX, DI
-	JMP loop
-
-err_corrupt:
-	MOVQ $-1, ret+48(FP)
-	RET
-
-err_short_buf:
-	MOVQ $-2, ret+48(FP)
-	RET
-
-end:
-	SUBQ R11, DI
-	MOVQ DI, ret+48(FP)
-	RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
deleted file mode 100644
index 919888e..0000000
--- a/vendor/github.com/pierrec/lz4/decode_other.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// +build !amd64 appengine !gc noasm
-
-package lz4
-
-func decodeBlock(dst, src []byte) (ret int) {
-	const hasError = -2
-	defer func() {
-		if recover() != nil {
-			ret = hasError
-		}
-	}()
-
-	var si, di int
-	for {
-		// Literals and match lengths (token).
-		b := int(src[si])
-		si++
-
-		// Literals.
-		if lLen := b >> 4; lLen > 0 {
-			switch {
-			case lLen < 0xF && si+16 < len(src):
-				// Shortcut 1
-				// if we have enough room in src and dst, and the literals length
-				// is small enough (0..14) then copy all 16 bytes, even if not all
-				// are part of the literals.
-				copy(dst[di:], src[si:si+16])
-				si += lLen
-				di += lLen
-				if mLen := b & 0xF; mLen < 0xF {
-					// Shortcut 2
-					// if the match length (4..18) fits within the literals, then copy
-					// all 18 bytes, even if not all are part of the literals.
-					mLen += 4
-					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
-						i := di - offset
-						end := i + 18
-						if end > len(dst) {
-							// The remaining buffer may not hold 18 bytes.
-							// See https://github.com/pierrec/lz4/issues/51.
-							end = len(dst)
-						}
-						copy(dst[di:], dst[i:end])
-						si += 2
-						di += mLen
-						continue
-					}
-				}
-			case lLen == 0xF:
-				for src[si] == 0xFF {
-					lLen += 0xFF
-					si++
-				}
-				lLen += int(src[si])
-				si++
-				fallthrough
-			default:
-				copy(dst[di:di+lLen], src[si:si+lLen])
-				si += lLen
-				di += lLen
-			}
-		}
-		if si >= len(src) {
-			return di
-		}
-
-		offset := int(src[si]) | int(src[si+1])<<8
-		if offset == 0 {
-			return hasError
-		}
-		si += 2
-
-		// Match.
-		mLen := b & 0xF
-		if mLen == 0xF {
-			for src[si] == 0xFF {
-				mLen += 0xFF
-				si++
-			}
-			mLen += int(src[si])
-			si++
-		}
-		mLen += minMatch
-
-		// Copy the match.
-		expanded := dst[di-offset:]
-		if mLen > offset {
-			// Efficiently copy the match dst[di-offset:di] into the dst slice.
-			bytesToCopy := offset * (mLen / offset)
-			for n := offset; n <= bytesToCopy+offset; n *= 2 {
-				copy(expanded[n:], expanded[:n])
-			}
-			di += bytesToCopy
-			mLen -= bytesToCopy
-		}
-		di += copy(dst[di:di+mLen], expanded[:mLen])
-	}
-}
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
deleted file mode 100644
index 1c45d18..0000000
--- a/vendor/github.com/pierrec/lz4/errors.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package lz4
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	rdebug "runtime/debug"
-)
-
-var (
-	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
-	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
-	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
-	// ErrInvalid is returned when reading an invalid LZ4 archive.
-	ErrInvalid = errors.New("lz4: bad magic number")
-	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
-	ErrBlockDependency = errors.New("lz4: block dependency not supported")
-	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
-	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
-)
-
-func recoverBlock(e *error) {
-	if r := recover(); r != nil && *e == nil {
-		if debugFlag {
-			fmt.Fprintln(os.Stderr, r)
-			rdebug.PrintStack()
-		}
-		*e = ErrInvalidSourceShortBuffer
-	}
-}
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
deleted file mode 100644
index 7a76a6b..0000000
--- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
-// (https://github.com/Cyan4973/XXH/)
-package xxh32
-
-import (
-	"encoding/binary"
-)
-
-const (
-	prime1 uint32 = 2654435761
-	prime2 uint32 = 2246822519
-	prime3 uint32 = 3266489917
-	prime4 uint32 = 668265263
-	prime5 uint32 = 374761393
-
-	primeMask   = 0xFFFFFFFF
-	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
-	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
-)
-
-// XXHZero represents an xxhash32 object with seed 0.
-type XXHZero struct {
-	v1       uint32
-	v2       uint32
-	v3       uint32
-	v4       uint32
-	totalLen uint64
-	buf      [16]byte
-	bufused  int
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xxh XXHZero) Sum(b []byte) []byte {
-	h32 := xxh.Sum32()
-	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
-}
-
-// Reset resets the Hash to its initial state.
-func (xxh *XXHZero) Reset() {
-	xxh.v1 = prime1plus2
-	xxh.v2 = prime2
-	xxh.v3 = 0
-	xxh.v4 = prime1minus
-	xxh.totalLen = 0
-	xxh.bufused = 0
-}
-
-// Size returns the number of bytes returned by Sum().
-func (xxh *XXHZero) Size() int {
-	return 4
-}
-
-// BlockSize gives the minimum number of bytes accepted by Write().
-func (xxh *XXHZero) BlockSize() int {
-	return 1
-}
-
-// Write adds input bytes to the Hash.
-// It never returns an error.
-func (xxh *XXHZero) Write(input []byte) (int, error) {
-	if xxh.totalLen == 0 {
-		xxh.Reset()
-	}
-	n := len(input)
-	m := xxh.bufused
-
-	xxh.totalLen += uint64(n)
-
-	r := len(xxh.buf) - m
-	if n < r {
-		copy(xxh.buf[m:], input)
-		xxh.bufused += len(input)
-		return n, nil
-	}
-
-	p := 0
-	// Causes compiler to work directly from registers instead of stack:
-	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
-	if m > 0 {
-		// some data left from previous update
-		copy(xxh.buf[xxh.bufused:], input[:r])
-		xxh.bufused += len(input) - r
-
-		// fast rotl(13)
-		buf := xxh.buf[:16] // BCE hint.
-		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
-		p = r
-		xxh.bufused = 0
-	}
-
-	for n := n - 16; p <= n; p += 16 {
-		sub := input[p:][:16] //BCE hint for compiler
-		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-	}
-	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
-
-	copy(xxh.buf[xxh.bufused:], input[p:])
-	xxh.bufused += len(input) - p
-
-	return n, nil
-}
-
-// Sum32 returns the 32 bits Hash value.
-func (xxh *XXHZero) Sum32() uint32 {
-	h32 := uint32(xxh.totalLen)
-	if h32 >= 16 {
-		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
-	} else {
-		h32 += prime5
-	}
-
-	p := 0
-	n := xxh.bufused
-	buf := xxh.buf
-	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
-		h32 = rol17(h32) * prime4
-	}
-	for ; p < n; p++ {
-		h32 += uint32(buf[p]) * prime5
-		h32 = rol11(h32) * prime1
-	}
-
-	h32 ^= h32 >> 15
-	h32 *= prime2
-	h32 ^= h32 >> 13
-	h32 *= prime3
-	h32 ^= h32 >> 16
-
-	return h32
-}
-
-// ChecksumZero returns the 32bits Hash value.
-func ChecksumZero(input []byte) uint32 {
-	n := len(input)
-	h32 := uint32(n)
-
-	if n < 16 {
-		h32 += prime5
-	} else {
-		v1 := prime1plus2
-		v2 := prime2
-		v3 := uint32(0)
-		v4 := prime1minus
-		p := 0
-		for n := n - 16; p <= n; p += 16 {
-			sub := input[p:][:16] //BCE hint for compiler
-			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-		}
-		input = input[p:]
-		n -= p
-		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-	}
-
-	p := 0
-	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
-		h32 = rol17(h32) * prime4
-	}
-	for p < n {
-		h32 += uint32(input[p]) * prime5
-		h32 = rol11(h32) * prime1
-		p++
-	}
-
-	h32 ^= h32 >> 15
-	h32 *= prime2
-	h32 ^= h32 >> 13
-	h32 *= prime3
-	h32 ^= h32 >> 16
-
-	return h32
-}
-
-// Uint32Zero hashes x with seed 0.
-func Uint32Zero(x uint32) uint32 {
-	h := prime5 + 4 + x*prime3
-	h = rol17(h) * prime4
-	h ^= h >> 15
-	h *= prime2
-	h ^= h >> 13
-	h *= prime3
-	h ^= h >> 16
-	return h
-}
-
-func rol1(u uint32) uint32 {
-	return u<<1 | u>>31
-}
-
-func rol7(u uint32) uint32 {
-	return u<<7 | u>>25
-}
-
-func rol11(u uint32) uint32 {
-	return u<<11 | u>>21
-}
-
-func rol12(u uint32) uint32 {
-	return u<<12 | u>>20
-}
-
-func rol13(u uint32) uint32 {
-	return u<<13 | u>>19
-}
-
-func rol17(u uint32) uint32 {
-	return u<<17 | u>>15
-}
-
-func rol18(u uint32) uint32 {
-	return u<<18 | u>>14
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
deleted file mode 100644
index a3284bd..0000000
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Package lz4 implements reading and writing lz4 compressed data (a frame),
-// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
-//
-// Although the block level compression and decompression functions are exposed and are fully compatible
-// with the lz4 block format definition, they are low level and should not be used directly.
-// For a complete description of an lz4 compressed block, see:
-// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-//
-// See https://github.com/Cyan4973/lz4 for the reference C implementation.
-//
-package lz4
-
-import (
-	"math/bits"
-	"sync"
-)
-
-const (
-	// Extension is the LZ4 frame file name extension
-	Extension = ".lz4"
-	// Version is the LZ4 frame format version
-	Version = 1
-
-	frameMagic       uint32 = 0x184D2204
-	frameSkipMagic   uint32 = 0x184D2A50
-	frameMagicLegacy uint32 = 0x184C2102
-
-	// The following constants are used to setup the compression algorithm.
-	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
-	winSizeLog          = 16 // LZ4 64Kb window size limit
-	winSize             = 1 << winSizeLog
-	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
-	compressedBlockFlag = 1 << 31
-	compressedBlockMask = compressedBlockFlag - 1
-
-	// hashLog determines the size of the hash table used to quickly find a previous match position.
-	// Its value influences the compression speed and memory usage, the lower the faster,
-	// but at the expense of the compression ratio.
-	// 16 seems to be the best compromise for fast compression.
-	hashLog = 16
-	htSize  = 1 << hashLog
-
-	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
-)
-
-// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-const (
-	blockSize64K = 1 << (16 + 2*iota)
-	blockSize256K
-	blockSize1M
-	blockSize4M
-)
-
-var (
-	// Keep a pool of buffers for each valid block sizes.
-	bsMapValue = [...]*sync.Pool{
-		newBufferPool(2 * blockSize64K),
-		newBufferPool(2 * blockSize256K),
-		newBufferPool(2 * blockSize1M),
-		newBufferPool(2 * blockSize4M),
-	}
-)
-
-// newBufferPool returns a pool for buffers of the given size.
-func newBufferPool(size int) *sync.Pool {
-	return &sync.Pool{
-		New: func() interface{} {
-			return make([]byte, size)
-		},
-	}
-}
-
-// getBuffer returns a buffer to its pool.
-func getBuffer(size int) []byte {
-	idx := blockSizeValueToIndex(size) - 4
-	return bsMapValue[idx].Get().([]byte)
-}
-
-// putBuffer returns a buffer to its pool.
-func putBuffer(size int, buf []byte) {
-	if cap(buf) > 0 {
-		idx := blockSizeValueToIndex(size) - 4
-		bsMapValue[idx].Put(buf[:cap(buf)])
-	}
-}
-func blockSizeIndexToValue(i byte) int {
-	return 1 << (16 + 2*uint(i))
-}
-func isValidBlockSize(size int) bool {
-	const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
-
-	return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
-}
-func blockSizeValueToIndex(size int) byte {
-	return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
-}
-
-// Header describes the various flags that can be set on a Writer or obtained from a Reader.
-// The default values match those of the LZ4 frame format definition
-// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
-//
-// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-// It is the caller's responsibility to check them if necessary.
-type Header struct {
-	BlockChecksum    bool   // Compressed blocks checksum flag.
-	NoChecksum       bool   // Frame checksum flag.
-	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
-	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
-	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
-	done             bool   // Header processed flag (Read or Write and checked).
-}
-
-// Reset reset internal status
-func (h *Header) Reset() {
-	h.done = false
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
deleted file mode 100644
index 9a0fb00..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build go1.10
-
-package lz4
-
-import (
-	"fmt"
-	"strings"
-)
-
-func (h Header) String() string {
-	var s strings.Builder
-
-	s.WriteString(fmt.Sprintf("%T{", h))
-	if h.BlockChecksum {
-		s.WriteString("BlockChecksum: true ")
-	}
-	if h.NoChecksum {
-		s.WriteString("NoChecksum: true ")
-	}
-	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
-	}
-	if l := h.CompressionLevel; l != 0 {
-		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
-	}
-	s.WriteByte('}')
-
-	return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
deleted file mode 100644
index 12c761a..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build !go1.10
-
-package lz4
-
-import (
-	"bytes"
-	"fmt"
-)
-
-func (h Header) String() string {
-	var s bytes.Buffer
-
-	s.WriteString(fmt.Sprintf("%T{", h))
-	if h.BlockChecksum {
-		s.WriteString("BlockChecksum: true ")
-	}
-	if h.NoChecksum {
-		s.WriteString("NoChecksum: true ")
-	}
-	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
-	}
-	if l := h.CompressionLevel; l != 0 {
-		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
-	}
-	s.WriteByte('}')
-
-	return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
deleted file mode 100644
index 87dd72b..0000000
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"io/ioutil"
-
-	"github.com/pierrec/lz4/internal/xxh32"
-)
-
-// Reader implements the LZ4 frame decoder.
-// The Header is set after the first call to Read().
-// The Header may change between Read() calls in case of concatenated frames.
-type Reader struct {
-	Header
-	// Handler called when a block has been successfully read.
-	// It provides the number of bytes read.
-	OnBlockDone func(size int)
-
-	buf      [8]byte       // Scrap buffer.
-	pos      int64         // Current position in src.
-	src      io.Reader     // Source.
-	zdata    []byte        // Compressed data.
-	data     []byte        // Uncompressed data.
-	idx      int           // Index of unread bytes into data.
-	checksum xxh32.XXHZero // Frame hash.
-	skip     int64         // Bytes to skip before next read.
-	dpos     int64         // Position in dest
-}
-
-// NewReader returns a new LZ4 frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReader(src io.Reader) *Reader {
-	r := &Reader{src: src}
-	return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *Reader) readHeader(first bool) error {
-	defer z.checksum.Reset()
-
-	buf := z.buf[:]
-	for {
-		magic, err := z.readUint32()
-		if err != nil {
-			z.pos += 4
-			if !first && err == io.ErrUnexpectedEOF {
-				return io.EOF
-			}
-			return err
-		}
-		if magic == frameMagic {
-			break
-		}
-		if magic>>8 != frameSkipMagic>>8 {
-			return ErrInvalid
-		}
-		skipSize, err := z.readUint32()
-		if err != nil {
-			return err
-		}
-		z.pos += 4
-		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
-		if err != nil {
-			return err
-		}
-		z.pos += m
-	}
-
-	// Header.
-	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
-		return err
-	}
-	z.pos += 8
-
-	b := buf[0]
-	if v := b >> 6; v != Version {
-		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
-	}
-	if b>>5&1 == 0 {
-		return ErrBlockDependency
-	}
-	z.BlockChecksum = b>>4&1 > 0
-	frameSize := b>>3&1 > 0
-	z.NoChecksum = b>>2&1 == 0
-
-	bmsID := buf[1] >> 4 & 0x7
-	if bmsID < 4 || bmsID > 7 {
-		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
-	}
-	bSize := blockSizeIndexToValue(bmsID - 4)
-	z.BlockMaxSize = bSize
-
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	if n := 2 * bSize; cap(z.zdata) < n {
-		z.zdata = make([]byte, n, n)
-	}
-	if debugFlag {
-		debug("header block max size id=%d size=%d", bmsID, bSize)
-	}
-	z.zdata = z.zdata[:bSize]
-	z.data = z.zdata[:cap(z.zdata)][bSize:]
-	z.idx = len(z.data)
-
-	_, _ = z.checksum.Write(buf[0:2])
-
-	if frameSize {
-		buf := buf[:8]
-		if _, err := io.ReadFull(z.src, buf); err != nil {
-			return err
-		}
-		z.Size = binary.LittleEndian.Uint64(buf)
-		z.pos += 8
-		_, _ = z.checksum.Write(buf)
-	}
-
-	// Header checksum.
-	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
-		return err
-	}
-	z.pos++
-	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
-		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
-	}
-
-	z.Header.done = true
-	if debugFlag {
-		debug("header read: %v", z.Header)
-	}
-
-	return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *Reader) Read(buf []byte) (int, error) {
-	if debugFlag {
-		debug("Read buf len=%d", len(buf))
-	}
-	if !z.Header.done {
-		if err := z.readHeader(true); err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
-				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
-		}
-	}
-
-	if len(buf) == 0 {
-		return 0, nil
-	}
-
-	if z.idx == len(z.data) {
-		// No data ready for reading, process the next block.
-		if debugFlag {
-			debug("reading block from writer")
-		}
-		// Reset uncompressed buffer
-		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
-		// Block length: 0 = end of frame, highest bit set: uncompressed.
-		bLen, err := z.readUint32()
-		if err != nil {
-			return 0, err
-		}
-		z.pos += 4
-
-		if bLen == 0 {
-			// End of frame reached.
-			if !z.NoChecksum {
-				// Validate the frame checksum.
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				if debugFlag {
-					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
-				}
-				z.pos += 4
-				if h := z.checksum.Sum32(); checksum != h {
-					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-			// Get ready for the next concatenated frame and keep the position.
-			pos := z.pos
-			z.Reset(z.src)
-			z.pos = pos
-
-			// Since multiple frames can be concatenated, check for more.
-			return 0, z.readHeader(false)
-		}
-
-		if debugFlag {
-			debug("raw block size %d", bLen)
-		}
-		if bLen&compressedBlockFlag > 0 {
-			// Uncompressed block.
-			bLen &= compressedBlockMask
-			if debugFlag {
-				debug("uncompressed block size %d", bLen)
-			}
-			if int(bLen) > cap(z.data) {
-				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-			}
-			z.data = z.data[:bLen]
-			if _, err := io.ReadFull(z.src, z.data); err != nil {
-				return 0, err
-			}
-			z.pos += int64(bLen)
-			if z.OnBlockDone != nil {
-				z.OnBlockDone(int(bLen))
-			}
-
-			if z.BlockChecksum {
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				z.pos += 4
-
-				if h := xxh32.ChecksumZero(z.data); h != checksum {
-					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-		} else {
-			// Compressed block.
-			if debugFlag {
-				debug("compressed block size %d", bLen)
-			}
-			if int(bLen) > cap(z.data) {
-				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-			}
-			zdata := z.zdata[:bLen]
-			if _, err := io.ReadFull(z.src, zdata); err != nil {
-				return 0, err
-			}
-			z.pos += int64(bLen)
-
-			if z.BlockChecksum {
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				z.pos += 4
-
-				if h := xxh32.ChecksumZero(zdata); h != checksum {
-					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-			n, err := UncompressBlock(zdata, z.data)
-			if err != nil {
-				return 0, err
-			}
-			z.data = z.data[:n]
-			if z.OnBlockDone != nil {
-				z.OnBlockDone(n)
-			}
-		}
-
-		if !z.NoChecksum {
-			_, _ = z.checksum.Write(z.data)
-			if debugFlag {
-				debug("current frame checksum %x", z.checksum.Sum32())
-			}
-		}
-		z.idx = 0
-	}
-
-	if z.skip > int64(len(z.data[z.idx:])) {
-		z.skip -= int64(len(z.data[z.idx:]))
-		z.dpos += int64(len(z.data[z.idx:]))
-		z.idx = len(z.data)
-		return 0, nil
-	}
-
-	z.idx += int(z.skip)
-	z.dpos += z.skip
-	z.skip = 0
-
-	n := copy(buf, z.data[z.idx:])
-	z.idx += n
-	z.dpos += int64(n)
-	if debugFlag {
-		debug("copied %d bytes to input", n)
-	}
-
-	return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *Reader) Seek(offset int64, whence int) (int64, error) {
-	if offset < 0 || whence != io.SeekCurrent {
-		return z.dpos + z.skip, ErrUnsupportedSeek
-	}
-	z.skip += offset
-	return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *Reader) Reset(r io.Reader) {
-	z.Header = Header{}
-	z.pos = 0
-	z.src = r
-	z.zdata = z.zdata[:0]
-	z.data = z.data[:0]
-	z.idx = 0
-	z.checksum.Reset()
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *Reader) readUint32() (uint32, error) {
-	buf := z.buf[:4]
-	_, err := io.ReadFull(z.src, buf)
-	x := binary.LittleEndian.Uint32(buf)
-	return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go
deleted file mode 100644
index 1670a77..0000000
--- a/vendor/github.com/pierrec/lz4/reader_legacy.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// ReaderLegacy implements the LZ4Demo frame decoder.
-// The Header is set after the first call to Read().
-type ReaderLegacy struct {
-	Header
-	// Handler called when a block has been successfully read.
-	// It provides the number of bytes read.
-	OnBlockDone func(size int)
-
-	lastBlock bool
-	buf       [8]byte   // Scrap buffer.
-	pos       int64     // Current position in src.
-	src       io.Reader // Source.
-	zdata     []byte    // Compressed data.
-	data      []byte    // Uncompressed data.
-	idx       int       // Index of unread bytes into data.
-	skip      int64     // Bytes to skip before next read.
-	dpos      int64     // Position in dest
-}
-
-// NewReaderLegacy returns a new LZ4Demo frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReaderLegacy(src io.Reader) *ReaderLegacy {
-	r := &ReaderLegacy{src: src}
-	return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *ReaderLegacy) readLegacyHeader() error {
-	z.lastBlock = false
-	magic, err := z.readUint32()
-	if err != nil {
-		z.pos += 4
-		if err == io.ErrUnexpectedEOF {
-			return io.EOF
-		}
-		return err
-	}
-	if magic != frameMagicLegacy {
-		return ErrInvalid
-	}
-	z.pos += 4
-
-	// Legacy has fixed 8MB blocksizes
-	// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
-	bSize := blockSize4M * 2
-
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	if n := 2 * bSize; cap(z.zdata) < n {
-		z.zdata = make([]byte, n, n)
-	}
-	if debugFlag {
-		debug("header block max size size=%d", bSize)
-	}
-	z.zdata = z.zdata[:bSize]
-	z.data = z.zdata[:cap(z.zdata)][bSize:]
-	z.idx = len(z.data)
-
-	z.Header.done = true
-	if debugFlag {
-		debug("header read: %v", z.Header)
-	}
-
-	return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *ReaderLegacy) Read(buf []byte) (int, error) {
-	if debugFlag {
-		debug("Read buf len=%d", len(buf))
-	}
-	if !z.Header.done {
-		if err := z.readLegacyHeader(); err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
-				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
-		}
-	}
-
-	if len(buf) == 0 {
-		return 0, nil
-	}
-
-	if z.idx == len(z.data) {
-		// No data ready for reading, process the next block.
-		if debugFlag {
-			debug("  reading block from writer %d %d", z.idx, blockSize4M*2)
-		}
-
-		// Reset uncompressed buffer
-		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
-		bLen, err := z.readUint32()
-		if err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("   bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
-		}
-		z.pos += 4
-
-		// Legacy blocks are always compressed, even when detrimental
-		if debugFlag {
-			debug("   compressed block size %d", bLen)
-		}
-
-		if int(bLen) > cap(z.data) {
-			return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-		}
-		zdata := z.zdata[:bLen]
-		if _, err := io.ReadFull(z.src, zdata); err != nil {
-			return 0, err
-		}
-		z.pos += int64(bLen)
-
-		n, err := UncompressBlock(zdata, z.data)
-		if err != nil {
-			return 0, err
-		}
-
-		z.data = z.data[:n]
-		if z.OnBlockDone != nil {
-			z.OnBlockDone(n)
-		}
-
-		z.idx = 0
-
-		// Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
-		// it means we've reached the end...
-		if n < blockSize4M*2 {
-			z.lastBlock = true
-		}
-	}
-
-	if z.skip > int64(len(z.data[z.idx:])) {
-		z.skip -= int64(len(z.data[z.idx:]))
-		z.dpos += int64(len(z.data[z.idx:]))
-		z.idx = len(z.data)
-		return 0, nil
-	}
-
-	z.idx += int(z.skip)
-	z.dpos += z.skip
-	z.skip = 0
-
-	n := copy(buf, z.data[z.idx:])
-	z.idx += n
-	z.dpos += int64(n)
-	if debugFlag {
-		debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
-	}
-	if z.lastBlock && len(z.data) == z.idx {
-		return n, io.EOF
-	}
-	return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
-	if offset < 0 || whence != io.SeekCurrent {
-		return z.dpos + z.skip, ErrUnsupportedSeek
-	}
-	z.skip += offset
-	return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *ReaderLegacy) Reset(r io.Reader) {
-	z.Header = Header{}
-	z.pos = 0
-	z.src = r
-	z.zdata = z.zdata[:0]
-	z.data = z.data[:0]
-	z.idx = 0
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *ReaderLegacy) readUint32() (uint32, error) {
-	buf := z.buf[:4]
-	_, err := io.ReadFull(z.src, buf)
-	x := binary.LittleEndian.Uint32(buf)
-	return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 0000000..5d7e88d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,36 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
+
+fuzz/*.zip
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
similarity index 100%
rename from vendor/github.com/pierrec/lz4/LICENSE
rename to vendor/github.com/pierrec/lz4/v4/LICENSE
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 0000000..dee7754
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,92 @@
+# lz4 : LZ4 compression in pure Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4)
+[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4/v4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/v4/cmd/lz4c@latest
+```
+
+Usage
+
+```
+Usage of lz4c:
+  -version
+        print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
new file mode 100644
index 0000000..8df0dc7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
@@ -0,0 +1,222 @@
+package lz4
+
+import (
+	"errors"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+type crState int
+
+const (
+	crStateInitial crState = iota
+	crStateReading 
+	crStateFlushing
+	crStateDone
+)
+
+type CompressingReader struct {
+	state crState
+	src io.ReadCloser // source reader
+	level lz4block.CompressionLevel // how hard to try
+	frame *lz4stream.Frame // frame being built
+	in []byte
+	out ovWriter
+	handler func(int)
+}
+
+// NewCompressingReader creates a reader which reads compressed data from
+// raw stream. This makes it a logical opposite of a normal lz4.Reader.
+// We require an io.ReadCloser as an underlying source for compatibility
+// with Go's http.Request.
+func NewCompressingReader(src io.ReadCloser) *CompressingReader {
+	zrd := &CompressingReader {
+		frame: lz4stream.NewFrame(),
+	}
+
+	_ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone)
+	zrd.Reset(src)
+
+	return zrd
+}
+
+// Source exposes the underlying source stream for introspection and control.
+func (zrd *CompressingReader) Source() io.ReadCloser {
+	return zrd.src
+}
+
+// Close simply invokes the underlying stream Close method. This method is
+// provided for the benefit of Go http client/server, which relies on Close
+// for goroutine termination.
+func (zrd *CompressingReader) Close() error {
+	return zrd.src.Close()
+}
+
+// Apply applies useful options to the lz4 encoder.
+func (zrd *CompressingReader) Apply(options ...Option) (err error) {
+	if zrd.state != crStateInitial {
+		return lz4errors.ErrOptionClosedOrError
+	}
+
+	zrd.Reset(zrd.src)
+
+	for _, o := range options {
+		if err = o(zrd); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (*CompressingReader) private() {}
+
+func (zrd *CompressingReader) init() error {
+	zrd.frame.InitW(&zrd.out, 1, false)
+	size := zrd.frame.Descriptor.Flags.BlockSizeIndex()
+	zrd.in = size.Get()
+	return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out)
+}
+
+// Read allows reading of lz4 compressed data
+func (zrd *CompressingReader) Read(p []byte) (n int, err error) {
+	defer func() {
+		if err != nil {
+			zrd.state = crStateDone
+		}
+	}()
+
+	if !zrd.out.reset(p) {
+		return len(p), nil
+	}
+
+	switch zrd.state {
+	case crStateInitial:
+		err = zrd.init()
+		if err != nil {
+			return
+		}
+		zrd.state = crStateReading
+	case crStateDone:
+		return 0, errors.New("This reader is done")
+	case crStateFlushing:
+		if zrd.out.dataPos > 0 {
+			n = zrd.out.dataPos
+			zrd.out.data = nil
+			zrd.out.dataPos = 0
+			return
+		} else {
+			zrd.state = crStateDone
+			return 0, io.EOF
+		}
+	}
+
+	for zrd.state == crStateReading {
+		block := zrd.frame.Blocks.Block
+
+		var rCount int
+		rCount, err = io.ReadFull(zrd.src, zrd.in)
+		switch err {
+		case nil:
+			err = block.Compress(
+				zrd.frame, zrd.in[ : rCount], zrd.level,
+			).Write(zrd.frame, &zrd.out)
+			zrd.handler(len(block.Data))
+			if err != nil {
+				return
+			}
+
+			if zrd.out.dataPos == len(zrd.out.data) {
+				n = zrd.out.dataPos
+				zrd.out.dataPos = 0
+				zrd.out.data = nil
+				return
+			}
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			if rCount > 0 {
+				err = block.Compress(
+					zrd.frame, zrd.in[ : rCount], zrd.level,
+				).Write(zrd.frame, &zrd.out)
+				zrd.handler(len(block.Data))
+				if err != nil {
+					return
+				}
+			}
+
+			err = zrd.frame.CloseW(&zrd.out, 1)
+			if err != nil {
+				return
+			}
+			zrd.state = crStateFlushing
+
+			n = zrd.out.dataPos
+			zrd.out.dataPos = 0
+			zrd.out.data = nil
+			return
+		default:
+			return
+		}
+	}
+
+	err = lz4errors.ErrInternalUnhandledState
+	return
+}
+
+// Reset makes the stream usable again; mostly handy to reuse lz4 encoder
+// instances.
+func (zrd *CompressingReader) Reset(src io.ReadCloser) {
+	zrd.frame.Reset(1)
+	zrd.state = crStateInitial
+	zrd.src = src
+	zrd.out.clear()
+}
+
+type ovWriter struct {
+	data []byte
+	ov []byte
+	dataPos int
+	ovPos int
+}
+
+func (wr *ovWriter) Write(p []byte) (n int, err error) {
+	count := copy(wr.data[wr.dataPos : ], p)
+	wr.dataPos += count
+
+	if count < len(p) {
+		wr.ov = append(wr.ov, p[count : ]...)
+	}
+
+	return len(p), nil
+}
+
+func (wr *ovWriter) reset(out []byte) bool {
+	ovRem := len(wr.ov) - wr.ovPos
+
+	if ovRem >= len(out) {
+		wr.ovPos += copy(out, wr.ov[wr.ovPos : ])
+		return false
+	}
+
+	if ovRem > 0 {
+		copy(out, wr.ov[wr.ovPos : ])
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = ovRem
+	} else if wr.ovPos > 0 {
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = 0
+	}
+
+	wr.data = out
+	return true
+}
+
+func (wr *ovWriter) clear() {
+	wr.data = nil
+	wr.dataPos = 0
+	wr.ov = wr.ov[ : 0]
+	wr.ovPos = 0
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 0000000..fec8adb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,481 @@
+package lz4block
+
+import (
+	"encoding/binary"
+	"math/bits"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+	// The following constants are used to setup the compression algorithm.
+	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog = 16 // LZ4 64Kb window size limit
+	winSize    = 1 << winSizeLog
+	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		*e = lz4errors.ErrInvalidSourceShortBuffer
+	}
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst, dict []byte) (int, error) {
+	if len(src) == 0 {
+		return 0, nil
+	}
+	if di := decodeBlock(dst, src, dict); di >= 0 {
+		return di, nil
+	}
+	return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+	// Offsets are at most 64kiB, so we can store only the lower 16 bits of
+	// match positions: effectively, an offset from some 64kiB block boundary.
+	//
+	// When we retrieve such an offset, we interpret it as relative to the last
+	// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+	// depending on which of these is inside the current window. If a table
+	// entry was generated more than 64kiB back in the input, we find out by
+	// inspecting the input stream.
+	table [htSize]uint16
+
+	// Bitmap indicating which positions in the table are in use.
+	// This allows us to quickly reset the table for reuse,
+	// without having to zero everything.
+	inUse [htSize / 32]uint32
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+	h &= htSize - 1
+	i := 0
+	if c.inUse[h/32]&(1<<(h%32)) != 0 {
+		i = int(c.table[h])
+	}
+	i += si &^ winMask
+	if i >= si {
+		// Try previous 64kiB block (negative when in first block).
+		i -= winSize
+	}
+	return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+	h &= htSize - 1
+	c.table[h] = uint16(si)
+	c.inUse[h/32] |= 1 << (h % 32)
+}
+
+func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+	c := compressorPool.Get().(*Compressor)
+	n, err := c.CompressBlock(src, dst)
+	compressorPool.Put(c)
+	return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	// Zero out reused table to avoid non-deterministic output (issue #65).
+	c.reset()
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	for si < sn {
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
+		h := blockHash(match)
+		h2 := blockHash(match >> 8)
+
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
+		ref := c.get(h, si)
+		ref2 := c.get(h2, si+1)
+		c.put(h, si)
+		c.put(h2, si+1)
+
+		offset := si - ref
+
+		if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref3 := c.get(h, si+2)
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref3
+				c.put(h, si)
+
+				if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
+		}
+
+		// Match found.
+		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
+
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
+		}
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si+8 <= sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
+		}
+
+		mLen = si - mLen
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF && di < len(dst); l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		if di+lLen > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		if di > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		c.put(h, si-2)
+	}
+
+lastLiterals:
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	if di >= len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	if di+len(src)-anchor > len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+	// hashTable: stores the last position found for a given hash
+	// chainTable: stores previous positions for a given hash
+	hashTable, chainTable [htSize]int
+	needsReset            bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+	c := compressorHCPool.Get().(*CompressorHC)
+	n, err := c.CompressBlock(src, dst, depth)
+	compressorHCPool.Put(c)
+	return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+	if c.needsReset {
+		// Zero out reused table to avoid non-deterministic output (issue #65).
+		c.hashTable = [htSize]int{}
+		c.chainTable = [htSize]int{}
+	}
+	c.needsReset = true // Only false on first call.
+
+	defer recoverBlock(&err)
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	if depth == 0 {
+		depth = winSize
+	}
+
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHashHC(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (<minMath) or smaller than the current match.
+				continue
+			}
+			// Found a longer match, keep its position and length.
+			mLen = ml
+			offset = si - next
+			// Try another previous position with the same hash.
+		}
+		c.chainTable[si&winMask] = c.hashTable[h]
+		c.hashTable[h] = si
+
+		// No match found.
+		if mLen == 0 {
+			si += 1 + (si-anchor)>>adaptSkipLog
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHashHC(match)
+			c.chainTable[si&winMask] = c.hashTable[h]
+			c.hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+lastLiterals:
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 0000000..138083d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,87 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+	Block64Kb uint32 = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+	Block8Mb = 2 * Block4Mb
+)
+
+var (
+	BlockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+	BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+	BlockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+	BlockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+	BlockPool8M   = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+	switch b {
+	case Block64Kb:
+		return 4
+	case Block256Kb:
+		return 5
+	case Block1Mb:
+		return 6
+	case Block4Mb:
+		return 7
+	case Block8Mb: // only valid in legacy mode
+		return 3
+	}
+	return 0
+}
+
+func IsValid(b uint32) bool {
+	return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+	switch b {
+	case 4, 5, 6, 7:
+		return true
+	}
+	return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+	var buf interface{}
+	switch b {
+	case 4:
+		buf = BlockPool64K.Get()
+	case 5:
+		buf = BlockPool256K.Get()
+	case 6:
+		buf = BlockPool1M.Get()
+	case 7:
+		buf = BlockPool4M.Get()
+	case 3:
+		buf = BlockPool8M.Get()
+	}
+	return buf.([]byte)
+}
+
+func Put(buf []byte) {
+	// Safeguard: do not allow invalid buffers.
+	switch c := cap(buf); uint32(c) {
+	case Block64Kb:
+		BlockPool64K.Put(buf[:c])
+	case Block256Kb:
+		BlockPool256K.Put(buf[:c])
+	case Block1Mb:
+		BlockPool1M.Put(buf[:c])
+	case Block4Mb:
+		BlockPool4M.Put(buf[:c])
+	case Block8Mb:
+		BlockPool8M.Put(buf[:c])
+	}
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 0000000..1d00133
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,448 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX literal and match lengths
+// DX token, match offset
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// R14 &dict
+// R15 len(dict)
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOSPLIT, $48-80
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, R11
+	MOVQ dst_len+8(FP), R8
+	ADDQ DI, R8
+
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R9
+	CMPQ R9, $0
+	JE   err_corrupt
+	ADDQ SI, R9
+
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+
+	// shortcut ends
+	// short output end
+	MOVQ R8, R12
+	SUBQ $32, R12
+	// short input end
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	XORL CX, CX
+
+loop:
+	// token := uint32(src[si])
+	MOVBLZX (SI), DX
+	INCQ SI
+
+	// lit_len = token >> 4
+	// if lit_len > 0
+	// CX = lit_len
+	MOVL DX, CX
+	SHRL $4, CX
+
+	// if lit_len != 0xF
+	CMPL CX, $0xF
+	JEQ  lit_len_loop
+	CMPQ DI, R12
+	JAE  copy_literal
+	CMPQ SI, R13
+	JAE  copy_literal
+
+	// copy shortcut
+
+	// A two-stage shortcut for the most common case:
+	// 1) If the literal length is 0..14, and there is enough space,
+	// enter the shortcut and copy 16 bytes on behalf of the literals
+	// (in the fast mode, only 8 bytes can be safely copied this way).
+	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
+	// manner; but we ensure that there's enough space in the output for
+	// those 18 bytes earlier, upon entering the shortcut (in other words,
+	// there is a combined check for both stages).
+
+	// copy literal
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+	ADDQ CX, DI
+	ADDQ CX, SI
+
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	// The second stage: prepare for match copying, decode full info.
+	// If it doesn't work out, the info won't be wasted.
+	// offset := uint16(data[:2])
+	MOVWLZX (SI), DX
+	TESTL DX, DX
+	JE err_corrupt
+	ADDQ $2, SI
+	JC err_short_buf
+
+	MOVQ DI, AX
+	SUBQ DX, AX
+	JC err_corrupt
+	CMPQ AX, DI
+	JA err_short_buf
+
+	// if we can't do the second stage then jump straight to read the
+	// match length, we already have the offset.
+	CMPL CX, $0xF
+	JEQ match_len_loop_pre
+	CMPL DX, $8
+	JLT match_len_loop_pre
+	CMPQ AX, R11
+	JB match_len_loop_pre
+
+	// memcpy(op + 0, match + 0, 8);
+	MOVQ (AX), BX
+	MOVQ BX, (DI)
+	// memcpy(op + 8, match + 8, 8);
+	MOVQ 8(AX), BX
+	MOVQ BX, 8(DI)
+	// memcpy(op +16, match +16, 2);
+	MOVW 16(AX), BX
+	MOVW BX, 16(DI)
+
+	LEAQ const_minMatch(DI)(CX*1), DI
+
+	// shortcut complete, load next token
+	JMP loopcheck
+
+	// Read the rest of the literal length:
+	// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
+lit_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE lit_len_loop
+
+copy_literal:
+	// bounds check src and dst
+	MOVQ SI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R9
+	JA err_short_buf
+
+	MOVQ DI, BX
+	ADDQ CX, BX
+	JC err_short_buf
+	CMPQ BX, R8
+	JA err_short_buf
+
+	// Copy literals of <=48 bytes through the XMM registers.
+	CMPQ CX, $48
+	JGT memmove_lit
+
+	// if len(dst[di:]) < 48
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $48
+	JLT memmove_lit
+
+	// if len(src[si:]) < 48
+	MOVQ R9, BX
+	SUBQ SI, BX
+	CMPQ BX, $48
+	JLT memmove_lit
+
+	MOVOU (SI), X0
+	MOVOU 16(SI), X1
+	MOVOU 32(SI), X2
+	MOVOU X0, (DI)
+	MOVOU X1, 16(DI)
+	MOVOU X2, 32(DI)
+
+	ADDQ CX, SI
+	ADDQ CX, DI
+
+	JMP finish_lit_copy
+
+memmove_lit:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment SI, DI now so we don't need to save CX.
+	ADDQ CX, DI
+	ADDQ CX, SI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVL DX, 40(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVL 40(SP), DX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+finish_lit_copy:
+	// CX := mLen
+	// free up DX to use for offset
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	CMPQ SI, R9
+	JAE end
+
+	// offset
+	// si += 2
+	// DX := int(src[si-2]) | int(src[si-1])<<8
+	ADDQ $2, SI
+	JC err_short_buf
+	CMPQ SI, R9
+	JA err_short_buf
+	MOVWQZX -2(SI), DX
+
+	// 0 offset is invalid
+	TESTL DX, DX
+	JEQ   err_corrupt
+
+match_len_loop_pre:
+	// if mlen != 0xF
+	CMPB CX, $0xF
+	JNE copy_match
+
+	// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
+match_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE match_len_loop
+
+copy_match:
+	ADDQ $const_minMatch, CX
+
+	// check we have match_len bytes left in dst
+	// di+match_len < len(dst)
+	MOVQ DI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R8
+	JA err_short_buf
+
+	// DX = offset
+	// CX = match_len
+	// BX = &dst + (di - offset)
+	MOVQ DI, BX
+	SUBQ DX, BX
+
+	// check BX is within dst
+	// if BX < &dst
+	JC copy_match_from_dict
+	CMPQ BX, R11
+	JBE copy_match_from_dict
+
+	// if offset + match_len < di
+	LEAQ (BX)(CX*1), AX
+	CMPQ DI, AX
+	JA copy_interior_match
+
+	// AX := len(dst[:di])
+	// MOVQ DI, AX
+	// SUBQ R11, AX
+
+	// copy 16 bytes at a time
+	// if di-offset < 16 copy 16-(di-offset) bytes to di
+	// then do the remaining
+
+copy_match_loop:
+	// for match_len >= 0
+	// dst[di] = dst[i]
+	// di++
+	// i++
+	MOVB (BX), AX
+	MOVB AX, (DI)
+	INCQ DI
+	INCQ BX
+	DECQ CX
+	JNZ copy_match_loop
+
+	JMP loopcheck
+
+copy_interior_match:
+	CMPQ CX, $16
+	JGT memmove_match
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_match
+
+	MOVOU (BX), X0
+	MOVOU X0, (DI)
+
+	ADDQ CX, DI
+	XORL CX, CX
+	JMP  loopcheck
+
+copy_match_from_dict:
+	// CX = match_len
+	// BX = &dst + (di - offset)
+
+	// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
+	MOVQ R11, AX
+	SUBQ BX, AX
+
+	// BX = len(dict) - dict_bytes_available
+	MOVQ R15, BX
+	SUBQ AX, BX
+	JS err_short_dict
+
+	ADDQ R14, BX
+
+	// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
+	CMPQ CX, AX
+	JLT memmove_match
+
+	// The match stretches over the dictionary and our block
+	// 1) copy what comes from the dictionary
+	// AX = dict_bytes_available = copy_size
+	// BX = &dict_end - copy_size
+	// CX = match_len
+
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ AX, 16(SP)
+	// store extra stuff we want to recover
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 16(SP), AX // copy_size
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX // match_len
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	// di+=copy_size
+	ADDQ AX, DI
+
+	// 2) copy the rest from the current block
+	// CX = match_len - copy_size = rest_size
+	SUBQ AX, CX
+	MOVQ R11, BX
+
+	// check if we have a copy overlap
+	// AX = &dst + rest_size
+	MOVQ CX, AX
+	ADDQ BX, AX
+	// if &dst + rest_size > di, copy byte by byte
+	CMPQ AX, DI
+
+	JA copy_match_loop
+
+memmove_match:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment DI now so we don't need to save CX.
+	ADDQ CX, DI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	XORL CX, CX
+
+loopcheck:
+	// for si < len(src)
+	CMPQ SI, R9
+	JB   loop
+
+end:
+	// Remaining length must be zero.
+	TESTQ CX, CX
+	JNE   err_corrupt
+
+	SUBQ R11, DI
+	MOVQ DI, ret+72(FP)
+	RET
+
+err_corrupt:
+	MOVQ $-1, ret+72(FP)
+	RET
+
+err_short_buf:
+	MOVQ $-2, ret+72(FP)
+	RET
+
+err_short_dict:
+	MOVQ $-3, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 0000000..20b21fc
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,231 @@
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst	R0
+#define dstorig	R1
+#define src	R2
+#define dstend	R3
+#define srcend	R4
+#define match	R5	// Match address.
+#define dictend	R6
+#define token	R7
+#define len	R8	// Literal and match lengths.
+#define offset	R7	// Match offset; overlaps with token.
+#define tmp1	R9
+#define tmp2	R11
+#define tmp3	R12
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
+	MOVW dst_base  +0(FP), dst
+	MOVW dst_len   +4(FP), dstend
+	MOVW src_base +12(FP), src
+	MOVW src_len  +16(FP), srcend
+
+	CMP $0, srcend
+	BEQ shortSrc
+
+	ADD dst, dstend
+	ADD src, srcend
+
+	MOVW dst, dstorig
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	MOVW    token >> 4, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CMP $0, len
+	BEQ copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S src, len, tmp2
+	BCS      shortSrc
+	CMP      dstend, tmp1
+	//BHI    shortDst // Uncomment for distinct error codes.
+	CMP.LS   srcend, tmp2
+	BHI      shortSrc
+
+	// Copy literal.
+	CMP $4, len
+	BLO copyLiteralFinish
+
+	// Copy 0-3 bytes until src is aligned.
+	TST        $1, src
+	MOVBU.NE.P 1(src), tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $1, len
+
+	TST        $2, src
+	MOVHU.NE.P 2(src), tmp2
+	MOVB.NE.P  tmp2, 1(dst)
+	MOVW.NE    tmp2 >> 8, tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $2, len
+
+	B copyLiteralLoopCond
+
+copyLiteralLoop:
+	// Aligned load, unaligned write.
+	MOVW.P 4(src), tmp1
+	MOVW   tmp1 >>  8, tmp2
+	MOVB   tmp2, 1(dst)
+	MOVW   tmp1 >> 16, tmp3
+	MOVB   tmp3, 2(dst)
+	MOVW   tmp1 >> 24, tmp2
+	MOVB   tmp2, 3(dst)
+	MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+	// Loop until len-4 < 0.
+	SUB.S  $4, len
+	BPL    copyLiteralLoop
+
+copyLiteralFinish:
+	// Copy remaining 0-3 bytes.
+	// At this point, len may be < 0, but len&3 is still accurate.
+	TST       $1, len
+	MOVB.NE.P 1(src), tmp3
+	MOVB.NE.P tmp3, 1(dst)
+	TST       $2, len
+	MOVB.NE.P 2(src), tmp1
+	MOVB.NE.P tmp1, 2(dst)
+	MOVB.NE   -1(src), tmp2
+	MOVB.NE   tmp2, -1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	// This frees up the token register for reuse as offset.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADD.S $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVBU -2(src), offset
+	MOVBU -1(src), tmp1
+	ORR.S tmp1 << 8, offset
+	BEQ   corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	// Bounds check dst+len+minMatch.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S $const_minMatch, tmp1
+	BCS      shortDst
+	CMP      dstend, tmp1
+	BHI      shortDst
+
+	RSB dst, offset, match
+	CMP dstorig, match
+	BGE copyMatch4
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	MOVW dict_base+24(FP), match
+	MOVW dict_len +28(FP), dictend
+
+	ADD $const_minMatch, len
+
+	RSB   dst, dstorig, tmp1
+	RSB   dictend, offset, tmp2
+	ADD.S tmp2, tmp1
+	BMI   shortDict
+	ADD   match, dictend
+	ADD   tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp1
+	MOVB.P  tmp1, 1(dst)
+	SUB.S   $1, len
+	CMP.NE  match, dictend
+	BNE     copyDict
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	CMP  $0, len
+	BEQ  copyMatchDone
+	MOVW dstorig, match
+	B    copyMatch
+
+	// Copy a regular match.
+	// Since len+minMatch is at least four, we can do a 4× unrolled
+	// byte copy loop. Using MOVW instead of four byte loads is faster,
+	// but to remain portable we'd have to align match first, which is
+	// too expensive. By alternating loads and stores, we also handle
+	// the case offset < 4.
+copyMatch4:
+	SUB.S   $4, len
+	MOVBU.P 4(match), tmp1
+	MOVB.P  tmp1, 4(dst)
+	MOVBU   -3(match), tmp2
+	MOVB    tmp2, -3(dst)
+	MOVBU   -2(match), tmp3
+	MOVB    tmp3, -2(dst)
+	MOVBU   -1(match), tmp1
+	MOVB    tmp1, -1(dst)
+	BPL     copyMatch4
+
+	// Restore len, which is now negative.
+	ADD.S $4, len
+	BEQ   copyMatchDone
+
+copyMatch:
+	// Finish with a byte-at-a-time copy.
+	SUB.S   $1, len
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	BNE     copyMatch
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CMP  $0, len
+	BNE  corrupt
+	SUB  dstorig, dst, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVW $-1, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
new file mode 100644
index 0000000..d2fe11b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -0,0 +1,241 @@
+// +build gc
+// +build !noasm
+
+// This implementation assumes that strict alignment checking is turned off.
+// The Go compiler makes the same assumption.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst		R0
+#define dstorig		R1
+#define src		R2
+#define dstend		R3
+#define dstend16	R4	// dstend - 16
+#define srcend		R5
+#define srcend16	R6	// srcend - 16
+#define match		R7	// Match address.
+#define dict		R8
+#define dictlen		R9
+#define dictend		R10
+#define token		R11
+#define len		R12	// Literal and match lengths.
+#define lenRem		R13
+#define offset		R14	// Match offset.
+#define tmp1		R15
+#define tmp2		R16
+#define tmp3		R17
+#define tmp4		R19
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
+	LDP  dst_base+0(FP), (dst, dstend)
+	ADD  dst, dstend
+	MOVD dst, dstorig
+
+	LDP src_base+24(FP), (src, srcend)
+	CBZ srcend, shortSrc
+	ADD src, srcend
+
+	// dstend16 = max(dstend-16, 0) and similarly for srcend16.
+	SUBS $16, dstend, dstend16
+	CSEL LO, ZR, dstend16, dstend16
+	SUBS $16, srcend, srcend16
+	CSEL LO, ZR, srcend16, srcend16
+
+	LDP dict_base+48(FP), (dict, dictlen)
+	ADD dict, dictlen, dictend
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	LSR     $4, token, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CBZ len, copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADDS dst, len, tmp1
+	BCS  shortSrc
+	ADDS src, len, tmp2
+	BCS  shortSrc
+	CMP  dstend, tmp1
+	BHI  shortDst
+	CMP  srcend, tmp2
+	BHI  shortSrc
+
+	// Copy literal.
+	SUBS $16, len
+	BLO  copyLiteralShort
+
+copyLiteralLoop:
+	LDP.P 16(src), (tmp1, tmp2)
+	STP.P (tmp1, tmp2), 16(dst)
+	SUBS  $16, len
+	BPL   copyLiteralLoop
+
+	// Copy (final part of) literal of length 0-15.
+	// If we have >=16 bytes left in src and dst, just copy 16 bytes.
+copyLiteralShort:
+	CMP  dstend16, dst
+	CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
+	BHS  copyLiteralShortEnd
+
+	AND $15, len
+
+	LDP (src), (tmp1, tmp2)
+	ADD len, src
+	STP (tmp1, tmp2), (dst)
+	ADD len, dst
+
+	B copyLiteralDone
+
+	// Safe but slow copy near the end of src, dst.
+copyLiteralShortEnd:
+	TBZ     $3, len, 3(PC)
+	MOVD.P  8(src), tmp1
+	MOVD.P  tmp1, 8(dst)
+	TBZ     $2, len, 3(PC)
+	MOVW.P  4(src), tmp2
+	MOVW.P  tmp2, 4(dst)
+	TBZ     $1, len, 3(PC)
+	MOVH.P  2(src), tmp3
+	MOVH.P  tmp3, 2(dst)
+	TBZ     $0, len, 3(PC)
+	MOVBU.P 1(src), tmp4
+	MOVB.P  tmp4, 1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADDS  $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVHU -2(src), offset
+	CBZ   offset, corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	ADD $const_minMatch, len
+
+	// Bounds check dst+len.
+	ADDS dst, len, tmp2
+	BCS  shortDst
+	CMP  dstend, tmp2
+	BHI  shortDst
+
+	SUB offset, dst, match
+	CMP dstorig, match
+	BHS copyMatchTry8
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	SUB  dstorig, dst, tmp1
+	SUB  offset, dictlen, tmp2
+	ADDS tmp2, tmp1
+	BMI  shortDict
+	ADD  dict, tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp3
+	MOVB.P  tmp3, 1(dst)
+	SUBS    $1, len
+	CCMP    NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
+	BNE     copyDict
+
+	CBZ len, copyMatchDone
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	// Recompute the offset for the next check.
+	MOVD dstorig, match
+	SUB  dstorig, dst, offset
+
+copyMatchTry8:
+	// Copy doublewords if both len and offset are at least eight.
+	// A 16-at-a-time loop doesn't provide a further speedup.
+	CMP  $8, len
+	CCMP HS, offset, $8, $0
+	BLO  copyMatchTry4
+
+	AND    $7, len, lenRem
+	SUB    $8, len
+copyMatchLoop8:
+	MOVD.P 8(match), tmp1
+	MOVD.P tmp1, 8(dst)
+	SUBS   $8, len
+	BPL    copyMatchLoop8
+
+	MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
+	ADD  lenRem, dst
+	MOVD $0, len
+	MOVD tmp2, -8(dst)
+	B    copyMatchDone
+
+copyMatchTry4:
+	// Copy words if both len and offset are at least four.
+	CMP  $4, len
+	CCMP HS, offset, $4, $0
+	BLO  copyMatchLoop1
+
+	MOVWU.P 4(match), tmp2
+	MOVWU.P tmp2, 4(dst)
+	SUBS    $4, len
+	BEQ     copyMatchDone
+
+copyMatchLoop1:
+	// Byte-at-a-time copy for small offsets <= 3.
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	SUBS    $1, len
+	BNE     copyMatchLoop1
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CBNZ len, corrupt
+	SUB  dstorig, dst, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVD $-1, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 0000000..8d9023d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,10 @@
+//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
+// +build amd64 arm arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src, dict []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 0000000..9f568fb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,139 @@
+//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm,!arm64 appengine !gc noasm
+
+package lz4block
+
+import (
+	"encoding/binary"
+)
+
+func decodeBlock(dst, src, dict []byte) (ret int) {
+	// Restrict capacities so we don't read or write out of bounds.
+	dst = dst[:len(dst):len(dst)]
+	src = src[:len(src):len(src)]
+
+	const hasError = -2
+
+	if len(src) == 0 {
+		return hasError
+	}
+
+	defer func() {
+		if recover() != nil {
+			ret = hasError
+		}
+	}()
+
+	var si, di uint
+	for si < uint(len(src)) {
+		// Literals and match lengths (token).
+		b := uint(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			switch {
+			case lLen < 0xF && si+16 < uint(len(src)):
+				// Shortcut 1
+				// if we have enough room in src and dst, and the literals length
+				// is small enough (0..14) then copy all 16 bytes, even if not all
+				// are part of the literals.
+				copy(dst[di:], src[si:si+16])
+				si += lLen
+				di += lLen
+				if mLen := b & 0xF; mLen < 0xF {
+					// Shortcut 2
+					// if the match length (4..18) fits within the literals, then copy
+					// all 18 bytes, even if not all are part of the literals.
+					mLen += 4
+					if offset := u16(src[si:]); mLen <= offset && offset < di {
+						i := di - offset
+						// The remaining buffer may not hold 18 bytes.
+						// See https://github.com/pierrec/lz4/issues/51.
+						if end := i + 18; end <= uint(len(dst)) {
+							copy(dst[di:], dst[i:end])
+							si += 2
+							di += mLen
+							continue
+						}
+					}
+				}
+			case lLen == 0xF:
+				for {
+					x := uint(src[si])
+					if lLen += x; int(lLen) < 0 {
+						return hasError
+					}
+					si++
+					if x != 0xFF {
+						break
+					}
+				}
+				fallthrough
+			default:
+				copy(dst[di:di+lLen], src[si:si+lLen])
+				si += lLen
+				di += lLen
+			}
+		}
+
+		mLen := b & 0xF
+		if si == uint(len(src)) && mLen == 0 {
+			break
+		} else if si >= uint(len(src)) {
+			return hasError
+		}
+
+		offset := u16(src[si:])
+		if offset == 0 {
+			return hasError
+		}
+		si += 2
+
+		// Match.
+		mLen += minMatch
+		if mLen == minMatch+0xF {
+			for {
+				x := uint(src[si])
+				if mLen += x; int(mLen) < 0 {
+					return hasError
+				}
+				si++
+				if x != 0xFF {
+					break
+				}
+			}
+		}
+
+		// Copy the match.
+		if di < offset {
+			// The match is beyond our block, meaning the first part
+			// is in the dictionary.
+			fromDict := dict[uint(len(dict))+di-offset:]
+			n := uint(copy(dst[di:di+mLen], fromDict))
+			di += n
+			if mLen -= n; mLen == 0 {
+				continue
+			}
+			// We copied n = offset-di bytes from the dictionary,
+			// then set di = di+n = offset, so the following code
+			// copies from dst[di-offset:] = dst[0:].
+		}
+
+		expanded := dst[di-offset:]
+		if mLen > offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+	}
+
+	return int(di)
+}
+
+func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 0000000..710ea42
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+	ErrInvalidSourceShortBuffer      Error = "lz4: invalid source or destination buffer too short"
+	ErrInvalidFrame                  Error = "lz4: bad magic number"
+	ErrInternalUnhandledState        Error = "lz4: unhandled state"
+	ErrInvalidHeaderChecksum         Error = "lz4: invalid header checksum"
+	ErrInvalidBlockChecksum          Error = "lz4: invalid block checksum"
+	ErrInvalidFrameChecksum          Error = "lz4: invalid frame checksum"
+	ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+	ErrOptionClosedOrError           Error = "lz4: cannot apply options on closed or in error object"
+	ErrOptionInvalidBlockSize        Error = "lz4: invalid block size"
+	ErrOptionNotApplicable           Error = "lz4: option not applicable"
+	ErrWriterNotClosed               Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 0000000..04aaca8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,348 @@
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+	Block  *FrameDataBlock
+	Blocks chan chan *FrameDataBlock
+	mu     sync.Mutex
+	err    error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return
+	}
+	b.Block = nil
+	if cap(b.Blocks) != num {
+		b.Blocks = make(chan chan *FrameDataBlock, num)
+	}
+	// goroutine managing concurrent block compression goroutines.
+	go func() {
+		// Process next block compression item.
+		for c := range b.Blocks {
+			// Read the next compressed block result.
+			// Waiting here ensures that the blocks are output in the order they were sent.
+			// The incoming channel is always closed as it indicates to the caller that
+			// the block has been processed.
+			block := <-c
+			if block == nil {
+				// Notify the block compression routine that we are done with its result.
+				// This is used when a sentinel block is sent to terminate the compression.
+				close(c)
+				return
+			}
+			// Do not attempt to write the block upon any previous failure.
+			if b.err == nil {
+				// Write the block.
+				if err := block.Write(f, dst); err != nil {
+					// Keep the first error.
+					b.err = err
+					// All pending compression goroutines need to shut down, so we need to keep going.
+				}
+			}
+			close(c)
+		}
+	}()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+	if num == 1 {
+		if b.Block != nil {
+			b.Block.Close(f)
+		}
+		err := b.err
+		b.err = nil
+		return err
+	}
+	if b.Blocks == nil {
+		err := b.err
+		b.err = nil
+		return err
+	}
+	c := make(chan *FrameDataBlock)
+	b.Blocks <- c
+	c <- nil
+	<-c
+	err := b.err
+	b.err = nil
+	return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+	size := f.Descriptor.Flags.BlockSizeIndex()
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return nil, nil
+	}
+	b.Block = nil
+	blocks := make(chan chan []byte, num)
+	// data receives the uncompressed blocks.
+	data := make(chan []byte)
+	// Read blocks from the source sequentially
+	// and uncompress them concurrently.
+
+	// In legacy mode, accrue the uncompress sizes in cum.
+	var cum uint32
+	go func() {
+		var cumx uint32
+		var err error
+		for b.ErrorR() == nil {
+			block := NewFrameDataBlock(f)
+			cumx, err = block.Read(f, src, 0)
+			if err != nil {
+				block.Close(f)
+				break
+			}
+			// Recheck for an error as reading may be slow and uncompressing is expensive.
+			if b.ErrorR() != nil {
+				block.Close(f)
+				break
+			}
+			c := make(chan []byte)
+			blocks <- c
+			go func() {
+				defer block.Close(f)
+				data, err := block.Uncompress(f, size.Get(), nil, false)
+				if err != nil {
+					b.closeR(err)
+					// Close the block channel to indicate an error.
+					close(c)
+				} else {
+					c <- data
+				}
+			}()
+		}
+		// End the collection loop and the data channel.
+		c := make(chan []byte)
+		blocks <- c
+		c <- nil // signal the collection loop that we are done
+		<-c      // wait for the collect loop to complete
+		if f.isLegacy() && cum == cumx {
+			err = io.EOF
+		}
+		b.closeR(err)
+		close(data)
+	}()
+	// Collect the uncompressed blocks and make them available
+	// on the returned channel.
+	go func(leg bool) {
+		defer close(blocks)
+		skipBlocks := false
+		for c := range blocks {
+			buf, ok := <-c
+			if !ok {
+				// A closed channel indicates an error.
+				// All remaining channels should be discarded.
+				skipBlocks = true
+				continue
+			}
+			if buf == nil {
+				// Signal to end the loop.
+				close(c)
+				return
+			}
+			if skipBlocks {
+				// A previous error has occurred, skipping remaining channels.
+				continue
+			}
+			// Perform checksum now as the blocks are received in order.
+			if f.Descriptor.Flags.ContentChecksum() {
+				_, _ = f.checksum.Write(buf)
+			}
+			if leg {
+				cum += uint32(len(buf))
+			}
+			data <- buf
+			close(c)
+		}
+	}(f.isLegacy())
+	return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+	b.mu.Lock()
+	if b.err == nil {
+		b.err = err
+	}
+	b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+	buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+	return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+	Size     DataBlockSize
+	Data     []byte // compressed or uncompressed data (.data or .src)
+	Checksum uint32
+	data     []byte // buffer for compressed data
+	src      []byte // uncompressed data
+	err      error  // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+	b.Size = 0
+	b.Checksum = 0
+	b.err = nil
+	if b.data != nil {
+		// Block was not already closed.
+		lz4block.Put(b.data)
+		b.Data = nil
+		b.data = nil
+		b.src = nil
+	}
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+	data := b.data
+	if f.isLegacy() {
+		data = data[:cap(data)]
+	} else {
+		data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+	}
+	var n int
+	switch level {
+	case lz4block.Fast:
+		n, _ = lz4block.CompressBlock(src, data)
+	default:
+		n, _ = lz4block.CompressBlockHC(src, data, level)
+	}
+	if n == 0 {
+		b.Size.UncompressedSet(true)
+		b.Data = src
+	} else {
+		b.Size.UncompressedSet(false)
+		b.Data = data[:n]
+	}
+	b.Size.sizeSet(len(b.Data))
+	b.src = src // keep track of the source for content checksum
+
+	if f.Descriptor.Flags.BlockChecksum() {
+		b.Checksum = xxh32.ChecksumZero(b.Data)
+	}
+	return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+	// Write is called in the same order as blocks are compressed,
+	// so content checksum must be done here.
+	if f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(b.src)
+	}
+	buf := f.buf[:]
+	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+	if _, err := dst.Write(buf[:4]); err != nil {
+		return err
+	}
+
+	if _, err := dst.Write(b.Data); err != nil {
+		return err
+	}
+
+	if b.Checksum == 0 {
+		return nil
+	}
+	binary.LittleEndian.PutUint32(buf, b.Checksum)
+	_, err := dst.Write(buf[:4])
+	return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+	x, err := f.readUint32(src)
+	if err != nil {
+		return 0, err
+	}
+	if f.isLegacy() {
+		switch x {
+		case frameMagicLegacy:
+			// Concatenated legacy frame.
+			return b.Read(f, src, cum)
+		case cum:
+			// Only works in non concurrent mode, for concurrent mode
+			// it is handled separately.
+			// Linux kernel format appends the total uncompressed size at the end.
+			return 0, io.EOF
+		}
+	} else if x == 0 {
+		// Marker for end of stream.
+		return 0, io.EOF
+	}
+	b.Size = DataBlockSize(x)
+
+	size := b.Size.size()
+	if size > cap(b.data) {
+		return x, lz4errors.ErrOptionInvalidBlockSize
+	}
+	b.data = b.data[:size]
+	if _, err := io.ReadFull(src, b.data); err != nil {
+		return x, err
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		sum, err := f.readUint32(src)
+		if err != nil {
+			return 0, err
+		}
+		b.Checksum = sum
+	}
+	return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
+	if b.Size.Uncompressed() {
+		n := copy(dst, b.data)
+		dst = dst[:n]
+	} else {
+		n, err := lz4block.UncompressBlock(b.data, dst, dict)
+		if err != nil {
+			return nil, err
+		}
+		dst = dst[:n]
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		if c := xxh32.ChecksumZero(b.data); c != b.Checksum {
+			err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+			return nil, err
+		}
+	}
+	if sum && f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(dst)
+	}
+	return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+	if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+		return
+	}
+	x = binary.LittleEndian.Uint32(f.buf[:4])
+	return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 0000000..18192a9
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,204 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+	frameMagic       uint32 = 0x184D2204
+	frameSkipMagic   uint32 = 0x184D2A50
+	frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+	return &Frame{}
+}
+
+type Frame struct {
+	buf        [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+	Magic      uint32
+	Descriptor FrameDescriptor
+	Blocks     Blocks
+	Checksum   uint32
+	checksum   xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+	f.Magic = 0
+	f.Descriptor.Checksum = 0
+	f.Descriptor.ContentSize = 0
+	_ = f.Blocks.close(f, num)
+	f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+	if legacy {
+		f.Magic = frameMagicLegacy
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+	} else {
+		f.Magic = frameMagic
+		f.Descriptor.initW()
+	}
+	f.Blocks.initW(f, dst, num)
+	f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+	if err := f.Blocks.close(f, num); err != nil {
+		return err
+	}
+	if f.isLegacy() {
+		return nil
+	}
+	buf := f.buf[:0]
+	// End mark (data block size of uint32(0)).
+	buf = append(buf, 0, 0, 0, 0)
+	if f.Descriptor.Flags.ContentChecksum() {
+		buf = f.checksum.Sum(buf)
+	}
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (f *Frame) isLegacy() bool {
+	return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) ParseHeaders(src io.Reader) error {
+	if f.Magic > 0 {
+		// Header already read.
+		return nil
+	}
+
+newFrame:
+	var err error
+	if f.Magic, err = f.readUint32(src); err != nil {
+		return err
+	}
+	switch m := f.Magic; {
+	case m == frameMagic || m == frameMagicLegacy:
+	// All 16 values of frameSkipMagic are valid.
+	case m>>8 == frameSkipMagic>>8:
+		skip, err := f.readUint32(src)
+		if err != nil {
+			return err
+		}
+		if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+			return err
+		}
+		goto newFrame
+	default:
+		return lz4errors.ErrInvalidFrame
+	}
+	if err := f.Descriptor.initR(f, src); err != nil {
+		return err
+	}
+	f.checksum.Reset()
+	return nil
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+	return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+	if f.isLegacy() {
+		return nil
+	}
+	if !f.Descriptor.Flags.ContentChecksum() {
+		return nil
+	}
+	if f.Checksum, err = f.readUint32(src); err != nil {
+		return err
+	}
+	if c := f.checksum.Sum32(); c != f.Checksum {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+	}
+	return nil
+}
+
+type FrameDescriptor struct {
+	Flags       DescriptorFlags
+	ContentSize uint64
+	Checksum    uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+	fd.Flags.VersionSet(1)
+	fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+	if fd.Checksum > 0 {
+		// Header already written.
+		return nil
+	}
+
+	buf := f.buf[:4]
+	// Write the magic number here even though it belongs to the Frame.
+	binary.LittleEndian.PutUint32(buf, f.Magic)
+	if !f.isLegacy() {
+		buf = buf[:4+2]
+		binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+		if fd.Flags.Size() {
+			buf = buf[:4+2+8]
+			binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+		}
+		fd.Checksum = descriptorChecksum(buf[4:])
+		buf = append(buf, fd.Checksum)
+	}
+
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+	if f.isLegacy() {
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+		return nil
+	}
+	// Read the flags and the checksum, hoping that there is not content size.
+	buf := f.buf[:3]
+	if _, err := io.ReadFull(src, buf); err != nil {
+		return err
+	}
+	descr := binary.LittleEndian.Uint16(buf)
+	fd.Flags = DescriptorFlags(descr)
+	if fd.Flags.Size() {
+		// Append the 8 missing bytes.
+		buf = buf[:3+8]
+		if _, err := io.ReadFull(src, buf[3:]); err != nil {
+			return err
+		}
+		fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+	}
+	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
+	if c := descriptorChecksum(buf); fd.Checksum != c {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+	}
+	// Validate the elements that can be.
+	if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+		return lz4errors.ErrOptionInvalidBlockSize
+	}
+	return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+	return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 0000000..d33a6be
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+//   field              bits
+//   -----              ----
+//   _                  2
+//   ContentChecksum    1
+//   Size               1
+//   BlockChecksum      1
+//   BlockIndependence  1
+//   Version            2
+//   _                  4
+//   BlockSizeIndex     3
+//   _                  1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool   { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool              { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool     { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16         { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+	return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 2
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+	const b = 1 << 3
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 4
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+	const b = 1 << 5
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+	*x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+	return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+	*x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+	return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+//   field         bits
+//   -----         ----
+//   size          31
+//   Uncompressed  1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int          { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+	*x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+	return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+	const b = 1 << 31
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..651d10c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
+
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v        [4]uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v[0] = prime1plus2
+	xxh.v[1] = prime2
+	xxh.v[2] = 0
+	xxh.v[3] = prime1minus
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	var buf *[16]byte
+	if m != 0 {
+		// some data left from previous update
+		buf = &xxh.buf
+		c := copy(buf[m:], input)
+		n -= c
+		input = input[c:]
+	}
+	update(&xxh.v, buf, input)
+	xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+	return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+	if buf != nil {
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+	}
+
+	for ; len(input) >= 16; input = input[16:] {
+		sub := input[:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+	}
+	v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+	} else {
+		h32 += prime5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime5
+	} else {
+		v1 := prime1plus2
+		v2 := prime2
+		v3 := uint32(0)
+		v4 := prime1minus
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 0000000..0978b26
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 0000000..c18ffd5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,251 @@
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define p	R0
+#define n	R1
+#define h	R2
+#define v1	R2	// Alias for h.
+#define v2	R3
+#define v3	R4
+#define v4	R5
+#define x1	R6
+#define x2	R7
+#define x3	R8
+#define x4	R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r	R11
+#define prime2r	R12
+#define prime3r	R3	// The rest can alias v{2-4}.
+#define prime4r	R4
+#define prime5r	R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned			\
+	MOVM.IA.W (p), [x1, x2, x3, x4]	\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MULA x2, prime2r, v2, v2	\
+	MULA x3, prime2r, v3, v3	\
+	MULA x4, prime2r, v4, v4	\
+					\
+	MOVW v1 @> 19, v1		\
+	MOVW v2 @> 19, v2		\
+	MOVW v3 @> 19, v3		\
+	MOVW v4 @> 19, v4		\
+					\
+	MUL prime1r, v1			\
+	MUL prime1r, v2			\
+	MUL prime1r, v3			\
+	MUL prime1r, v4			\
+
+#define round16unaligned 		\
+	MOVBU.P  16(p), x1		\
+	MOVBU   -15(p), x2		\
+	ORR     x2 <<  8, x1		\
+	MOVBU   -14(p), x3		\
+	MOVBU   -13(p), x4		\
+	ORR     x4 <<  8, x3		\
+	ORR     x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MOVW v1 @> 19, v1		\
+	MUL prime1r, v1			\
+					\
+	MOVBU -12(p), x1		\
+	MOVBU -11(p), x2		\
+	ORR   x2 <<  8, x1		\
+	MOVBU -10(p), x3		\
+	MOVBU  -9(p), x4		\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v2, v2	\
+	MOVW v2 @> 19, v2		\
+	MUL prime1r, v2			\
+					\
+	MOVBU -8(p), x1			\
+	MOVBU -7(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -6(p), x3			\
+	MOVBU -5(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v3, v3	\
+	MOVW v3 @> 19, v3		\
+	MUL prime1r, v3			\
+					\
+	MOVBU -4(p), x1			\
+	MOVBU -3(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -2(p), x3			\
+	MOVBU -1(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v4, v4	\
+	MOVW v4 @> 19, v4		\
+	MUL prime1r, v4			\
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+	MOVW input_base+0(FP), p
+	MOVW input_len+4(FP),  n
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+	// here, but that's a pseudo-op that generates a load through R11.
+	MOVW $const_prime5, prime5r
+	ADD  prime5r, n, h
+	CMP  $0, n
+	BEQ  end
+
+	// We let n go negative so we can do comparisons with SUB.S
+	// instead of separate CMP.
+	SUB.S $16, n
+	BMI   loop16done
+
+	ADD  prime1r, prime2r, v1
+	MOVW prime2r, v2
+	MOVW $0, v3
+	RSB  $0, prime1r, v4
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   loop16finish
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+loop16finish:
+	MOVW v1 @> 31, h
+	ADD  v2 @> 25, h
+	ADD  v3 @> 20, h
+	ADD  v4 @> 14, h
+
+	// h += len(input) with v2 as temporary.
+	MOVW input_len+4(FP), v2
+	ADD  v2, h
+
+loop16done:
+	ADD $16, n	// Restore number of bytes left.
+
+	SUB.S $4, n
+	MOVW  $const_prime3, prime3r
+	BMI   loop4done
+	MOVW  $const_prime4, prime4r
+
+	TST $3, p
+	BNE loop4unaligned
+
+loop4aligned:
+	SUB.S $4, n
+
+	MOVW.P 4(p), x1
+	MULA   prime3r, x1, h, h
+	MOVW   h @> 15, h
+	MUL    prime4r, h
+
+	BPL loop4aligned
+	B   loop4done
+
+loop4unaligned:
+	SUB.S $4, n
+
+	MOVBU.P  4(p), x1
+	MOVBU   -3(p), x2
+	ORR     x2 <<  8, x1
+	MOVBU   -2(p), x3
+	ORR     x3 << 16, x1
+	MOVBU   -1(p), x4
+	ORR     x4 << 24, x1
+
+	MULA prime3r, x1, h, h
+	MOVW h @> 15, h
+	MUL  prime4r, h
+
+	BPL loop4unaligned
+
+loop4done:
+	ADD.S $4, n	// Restore number of bytes left.
+	BEQ   end
+
+	MOVW $const_prime5, prime5r
+
+loop1:
+	SUB.S $1, n
+
+	MOVBU.P 1(p), x1
+	MULA    prime5r, x1, h, h
+	MOVW    h @> 21, h
+	MUL     prime1r, h
+
+	BNE loop1
+
+end:
+	MOVW $const_prime3, prime3r
+	EOR  h >> 15, h
+	MUL  prime2r, h
+	EOR  h >> 13, h
+	MUL  prime3r, h
+	EOR  h >> 16, h
+
+	MOVW h, ret+12(FP)
+	RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+	MOVW    v+0(FP), p
+	MOVM.IA (p), [v1, v2, v3, v4]
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Process buf, if not nil.
+	MOVW buf+4(FP), p
+	CMP  $0, p
+	BEQ  noBuffered
+
+	round16aligned
+
+noBuffered:
+	MOVW input_base +8(FP), p
+	MOVW input_len +12(FP), n
+
+	SUB.S $16, n
+	BMI   end
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   end
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+end:
+	MOVW    v+0(FP), p
+	MOVM.IA [v1, v2, v3, v4], (p)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 0000000..c96b59b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+	updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 0000000..a62022e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,157 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+	// Safety checks for duplicated elements.
+	var x [1]struct{}
+	_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+	_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+	_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+	_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+	_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, nil)
+}
+
+// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
+// dictionary, and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, dict)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+	return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+	// Level is the maximum search depth for compression.
+	// Values <= 0 mean no maximum.
+	Level CompressionLevel
+	c     lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+	ErrInvalidFrame = lz4errors.ErrInvalidFrame
+	// ErrInternalUnhandledState is an internal error.
+	ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+	// ErrInvalidHeaderChecksum is returned when reading a frame.
+	ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+	// ErrInvalidBlockChecksum is returned when reading a frame.
+	ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+	// ErrInvalidFrameChecksum is returned when reading a frame.
+	ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+	ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+	ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+	// ErrOptionInvalidBlockSize is returned when
+	ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+	ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+	ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 0000000..57a44e7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+	applier interface {
+		Apply(...Option) error
+		private()
+	}
+	// Option defines the parameters to setup an LZ4 Writer or Reader.
+	Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+	return o(nil).Error()
+}
+
+// Default options.
+var (
+	DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+	DefaultChecksumOption  = ChecksumOption(true)
+	DefaultConcurrency     = ConcurrencyOption(1)
+	defaultOnBlockDone     = OnBlockDoneOption(nil)
+)
+
+const (
+	Block64Kb BlockSize = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockSizeOption(%s)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		case *CompressingReader:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("SizeOption(%d)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+	if n <= 0 {
+		n = runtime.GOMAXPROCS(0)
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.num = n
+			return nil
+		case *Reader:
+			rw.num = n
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+	Fast   CompressionLevel = 0
+	Level1 CompressionLevel = 1 << (8 + iota)
+	Level2
+	Level3
+	Level4
+	Level5
+	Level6
+	Level7
+	Level8
+	Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+			return lz4errors.Error(s)
+		case *Writer:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		case *CompressingReader:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+	if handler == nil {
+		handler = onBlockDone
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.handler = handler
+			return nil
+		case *Reader:
+			rw.handler = handler
+			return nil
+		case *CompressingReader:
+			rw.handler = handler
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("LegacyOption(%v)", legacy)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.legacy = legacy
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 0000000..2de8149
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Block64Kb-65536]
+	_ = x[Block256Kb-262144]
+	_ = x[Block1Mb-1048576]
+	_ = x[Block4Mb-4194304]
+}
+
+const (
+	_BlockSize_name_0 = "Block64Kb"
+	_BlockSize_name_1 = "Block256Kb"
+	_BlockSize_name_2 = "Block1Mb"
+	_BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+	switch {
+	case i == 65536:
+		return _BlockSize_name_0
+	case i == 262144:
+		return _BlockSize_name_1
+	case i == 1048576:
+		return _BlockSize_name_2
+	case i == 4194304:
+		return _BlockSize_name_3
+	default:
+		return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Fast-0]
+	_ = x[Level1-512]
+	_ = x[Level2-1024]
+	_ = x[Level3-2048]
+	_ = x[Level4-4096]
+	_ = x[Level5-8192]
+	_ = x[Level6-16384]
+	_ = x[Level7-32768]
+	_ = x[Level8-65536]
+	_ = x[Level9-131072]
+}
+
+const (
+	_CompressionLevel_name_0 = "Fast"
+	_CompressionLevel_name_1 = "Level1"
+	_CompressionLevel_name_2 = "Level2"
+	_CompressionLevel_name_3 = "Level3"
+	_CompressionLevel_name_4 = "Level4"
+	_CompressionLevel_name_5 = "Level5"
+	_CompressionLevel_name_6 = "Level6"
+	_CompressionLevel_name_7 = "Level7"
+	_CompressionLevel_name_8 = "Level8"
+	_CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+	switch {
+	case i == 0:
+		return _CompressionLevel_name_0
+	case i == 512:
+		return _CompressionLevel_name_1
+	case i == 1024:
+		return _CompressionLevel_name_2
+	case i == 2048:
+		return _CompressionLevel_name_3
+	case i == 4096:
+		return _CompressionLevel_name_4
+	case i == 8192:
+		return _CompressionLevel_name_5
+	case i == 16384:
+		return _CompressionLevel_name_6
+	case i == 32768:
+		return _CompressionLevel_name_7
+	case i == 65536:
+		return _CompressionLevel_name_8
+	case i == 131072:
+		return _CompressionLevel_name_9
+	default:
+		return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 0000000..275daad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+	"bytes"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+	noState:     newState,
+	errorState:  newState,
+	newState:    readState,
+	readState:   closedState,
+	closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+	return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+	zr := &Reader{frame: lz4stream.NewFrame()}
+	zr.state.init(readerStates)
+	_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+	zr.Reset(r)
+	return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+	state   _State
+	src     io.Reader        // source reader
+	num     int              // concurrency level
+	frame   *lz4stream.Frame // frame being read
+	data    []byte           // block buffer allocated in non concurrent mode
+	reads   chan []byte      // pending data
+	idx     int              // size of pending data
+	handler func(int)
+	cum     uint32
+	dict    []byte
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case newState:
+	case errorState:
+		return r.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	for _, o := range options {
+		if err = o(r); err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+	switch r.state.state {
+	case readState, closedState:
+		if r.frame.Descriptor.Flags.Size() {
+			return int(r.frame.Descriptor.ContentSize)
+		}
+	}
+	return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+	return r.num == 1
+}
+
+func (r *Reader) init() error {
+	err := r.frame.ParseHeaders(r.src)
+	if err != nil {
+		return err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		// We can't decompress dependent blocks concurrently.
+		// Instead of throwing an error to the user, silently drop concurrency
+		r.num = 1
+	}
+	data, err := r.frame.InitR(r.src, r.num)
+	if err != nil {
+		return err
+	}
+	r.reads = data
+	r.idx = 0
+	size := r.frame.Descriptor.Flags.BlockSizeIndex()
+	r.data = size.Get()
+	r.cum = 0
+	return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case readState:
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		// First initialization.
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	for len(buf) > 0 {
+		var bn int
+		if r.idx == 0 {
+			if r.isNotConcurrent() {
+				bn, err = r.read(buf)
+			} else {
+				lz4block.Put(r.data)
+				r.data = <-r.reads
+				if len(r.data) == 0 {
+					// No uncompressed data: something went wrong or we are done.
+					err = r.frame.Blocks.ErrorR()
+				}
+			}
+			switch err {
+			case nil:
+			case io.EOF:
+				if er := r.frame.CloseR(r.src); er != nil {
+					err = er
+				}
+				lz4block.Put(r.data)
+				r.data = nil
+				return
+			default:
+				return
+			}
+		}
+		if bn == 0 {
+			// Fill buf with buffered data.
+			bn = copy(buf, r.data[r.idx:])
+			r.idx += bn
+			if r.idx == len(r.data) {
+				// All data read, get ready for the next Read.
+				r.idx = 0
+			}
+		}
+		buf = buf[bn:]
+		n += bn
+		r.handler(bn)
+	}
+	return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+//   and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+	block := r.frame.Blocks.Block
+	_, err := block.Read(r.frame, r.src, r.cum)
+	if err != nil {
+		return 0, err
+	}
+	var direct bool
+	dst := r.data[:cap(r.data)]
+	if len(buf) >= len(dst) {
+		// Uncompress directly into buf.
+		direct = true
+		dst = buf
+	}
+	dst, err = block.Uncompress(r.frame, dst, r.dict, true)
+	if err != nil {
+		return 0, err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		if len(r.dict)+len(dst) > 128*1024 {
+			preserveSize := 64*1024 - len(dst)
+			if preserveSize < 0 {
+				preserveSize = 0
+			}
+			r.dict = r.dict[len(r.dict)-preserveSize:]
+		}
+		r.dict = append(r.dict, dst...)
+	}
+	r.cum += uint32(len(dst))
+	if direct {
+		return len(dst), nil
+	}
+	r.data = dst
+	return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead reading from reader.
+// No access to reader is performed.
+func (r *Reader) Reset(reader io.Reader) {
+	if r.data != nil {
+		lz4block.Put(r.data)
+		r.data = nil
+	}
+	r.frame.Reset(r.num)
+	r.state.reset()
+	r.src = reader
+	r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	switch r.state.state {
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	defer r.state.nextd(&err)
+
+	var data []byte
+	if r.isNotConcurrent() {
+		size := r.frame.Descriptor.Flags.BlockSizeIndex()
+		data = size.Get()
+		defer lz4block.Put(data)
+	}
+	for {
+		var bn int
+		var dst []byte
+		if r.isNotConcurrent() {
+			bn, err = r.read(data)
+			dst = data[:bn]
+		} else {
+			lz4block.Put(dst)
+			dst = <-r.reads
+			bn = len(dst)
+			if bn == 0 {
+				// No uncompressed data: something went wrong or we are done.
+				err = r.frame.Blocks.ErrorR()
+			}
+		}
+		switch err {
+		case nil:
+		case io.EOF:
+			err = r.frame.CloseR(r.src)
+			return
+		default:
+			return
+		}
+		r.handler(bn)
+		bn, err = w.Write(dst)
+		n += int64(bn)
+		if err != nil {
+			return
+		}
+	}
+}
+
+// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
+func ValidFrameHeader(in []byte) (bool, error) {
+	f := lz4stream.NewFrame()
+	err := f.ParseHeaders(bytes.NewReader(in))
+	if err == nil {
+		return true, nil
+	}
+	if err == lz4errors.ErrInvalidFrame {
+		return false, nil
+	}
+	return false, err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 0000000..d94f04d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+	noState     aState = iota // uninitialized reader
+	errorState                // unrecoverable error encountered
+	newState                  // instantiated object
+	readState                 // reading data
+	writeState                // writing data
+	closedState               // all done
+)
+
+type (
+	aState uint8
+	_State struct {
+		states []aState
+		state  aState
+		err    error
+	}
+)
+
+func (s *_State) init(states []aState) {
+	s.states = states
+	s.state = states[0]
+}
+
+func (s *_State) reset() {
+	s.state = s.states[0]
+	s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+	if err != nil {
+		s.err = fmt.Errorf("%s: %w", s.state, err)
+		s.state = errorState
+		return true
+	}
+	s.state = s.states[s.state]
+	return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+	return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+	if s.state == errorState || errp == nil {
+		return
+	}
+	if err := *errp; err != nil {
+		s.err = fmt.Errorf("%w[%s]", err, s.state)
+		if !errors.Is(err, io.EOF) {
+			s.state = errorState
+		}
+	}
+}
+
+func (s *_State) fail() error {
+	s.state = errorState
+	s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+	return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 0000000..75fb828
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[noState-0]
+	_ = x[errorState-1]
+	_ = x[newState-2]
+	_ = x[readState-3]
+	_ = x[writeState-4]
+	_ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+	if i >= aState(len(_aState_index)-1) {
+		return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 0000000..4358ade
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+	noState:     newState,
+	newState:    writeState,
+	writeState:  closedState,
+	closedState: newState,
+	errorState:  newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+	zw := &Writer{frame: lz4stream.NewFrame()}
+	zw.state.init(writerStates)
+	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+	zw.Reset(w)
+	return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+	state   _State
+	src     io.Writer                 // destination writer
+	level   lz4block.CompressionLevel // how hard to try
+	num     int                       // concurrency level
+	frame   *lz4stream.Frame          // frame being built
+	data    []byte                    // pending data
+	idx     int                       // size of pending data
+	handler func(int)
+	legacy  bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case newState:
+	case errorState:
+		return w.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	w.Reset(w.src)
+	for _, o := range options {
+		if err = o(w); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+	return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+	w.frame.InitW(w.src, w.num, w.legacy)
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	w.data = size.Get()
+	w.idx = 0
+	return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case writeState:
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+
+	zn := len(w.data)
+	for len(buf) > 0 {
+		if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err = w.write(buf[:zn], false); err != nil {
+				return
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(w.data[w.idx:], buf)
+		n += m
+		w.idx += m
+		buf = buf[m:]
+
+		if w.idx < len(w.data) {
+			// Buffer not filled.
+			return
+		}
+
+		// Buffer full.
+		if err = w.write(w.data, true); err != nil {
+			return
+		}
+		if !w.isNotConcurrent() {
+			size := w.frame.Descriptor.Flags.BlockSizeIndex()
+			w.data = size.Get()
+		}
+		w.idx = 0
+	}
+	return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+	if w.isNotConcurrent() {
+		block := w.frame.Blocks.Block
+		err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+		w.handler(len(block.Data))
+		return err
+	}
+	c := make(chan *lz4stream.FrameDataBlock)
+	w.frame.Blocks.Blocks <- c
+	go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+		b := lz4stream.NewFrameDataBlock(w.frame)
+		c <- b.Compress(w.frame, data, w.level)
+		<-c
+		w.handler(len(b.Data))
+		b.Close(w.frame)
+		if safe {
+			// safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+			lz4block.Put(data)
+		}
+	}(c, data, safe)
+
+	return nil
+}
+
+// Flush any buffered data to the underlying writer immediately.
+func (w *Writer) Flush() (err error) {
+	switch w.state.state {
+	case writeState:
+	case errorState:
+		return w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return nil
+	}
+
+	if w.idx > 0 {
+		// Flush pending data, disable w.data freeing as it is done later on.
+		if err = w.write(w.data[:w.idx], false); err != nil {
+			return err
+		}
+		w.idx = 0
+	}
+	return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying writer
+// without closing it.
+func (w *Writer) Close() error {
+	if err := w.Flush(); err != nil {
+		return err
+	}
+	err := w.frame.CloseW(w.src, w.num)
+	// It is now safe to free the buffer.
+	if w.data != nil {
+		lz4block.Put(w.data)
+		w.data = nil
+	}
+	return err
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+	w.frame.Reset(w.num)
+	w.state.reset()
+	w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+	switch w.state.state {
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+	defer w.state.check(&err)
+
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	var done bool
+	var rn int
+	data := size.Get()
+	if w.isNotConcurrent() {
+		// Keep the same buffer for the whole process.
+		defer lz4block.Put(data)
+	}
+	for !done {
+		rn, err = io.ReadFull(r, data)
+		switch err {
+		case nil:
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			done = true
+		default:
+			return
+		}
+		n += int64(rn)
+		err = w.write(data[:rn], true)
+		if err != nil {
+			return
+		}
+		w.handler(rn)
+		if !done && !w.isNotConcurrent() {
+			// The buffer will be returned automatically by go routines (safe=true)
+			// so get a new one fo the next round.
+			data = size.Get()
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
deleted file mode 100644
index 6a60a9a..0000000
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"runtime"
-
-	"github.com/pierrec/lz4/internal/xxh32"
-)
-
-// zResult contains the results of compressing a block.
-type zResult struct {
-	size     uint32 // Block header
-	data     []byte // Compressed data
-	checksum uint32 // Data checksum
-}
-
-// Writer implements the LZ4 frame encoder.
-type Writer struct {
-	Header
-	// Handler called when a block has been successfully written out.
-	// It provides the number of bytes written.
-	OnBlockDone func(size int)
-
-	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
-	dst       io.Writer     // Destination.
-	checksum  xxh32.XXHZero // Frame checksum.
-	data      []byte        // Data to be compressed + buffer for compressed data.
-	idx       int           // Index into data.
-	hashtable [winSize]int  // Hash table used in CompressBlock().
-
-	// For concurrency.
-	c   chan chan zResult // Channel for block compression goroutines and writer goroutine.
-	err error             // Any error encountered while writing to the underlying destination.
-}
-
-// NewWriter returns a new LZ4 frame encoder.
-// No access to the underlying io.Writer is performed.
-// The supplied Header is checked at the first Write.
-// It is ok to change it before the first Write but then not until a Reset() is performed.
-func NewWriter(dst io.Writer) *Writer {
-	z := new(Writer)
-	z.Reset(dst)
-	return z
-}
-
-// WithConcurrency sets the number of concurrent go routines used for compression.
-// A negative value sets the concurrency to GOMAXPROCS.
-func (z *Writer) WithConcurrency(n int) *Writer {
-	switch {
-	case n == 0 || n == 1:
-		z.c = nil
-		return z
-	case n < 0:
-		n = runtime.GOMAXPROCS(0)
-	}
-	z.c = make(chan chan zResult, n)
-	// Writer goroutine managing concurrent block compression goroutines.
-	go func() {
-		// Process next block compression item.
-		for c := range z.c {
-			// Read the next compressed block result.
-			// Waiting here ensures that the blocks are output in the order they were sent.
-			// The incoming channel is always closed as it indicates to the caller that
-			// the block has been processed.
-			res := <-c
-			n := len(res.data)
-			if n == 0 {
-				// Notify the block compression routine that we are done with its result.
-				// This is used when a sentinel block is sent to terminate the compression.
-				close(c)
-				return
-			}
-			// Write the block.
-			if err := z.writeUint32(res.size); err != nil && z.err == nil {
-				z.err = err
-			}
-			if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
-				z.err = err
-			}
-			if z.BlockChecksum {
-				if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
-					z.err = err
-				}
-			}
-			if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
-				// It is now safe to release the buffer as no longer in use by any goroutine.
-				putBuffer(cap(res.data), res.data)
-			}
-			if h := z.OnBlockDone; h != nil {
-				h(n)
-			}
-			close(c)
-		}
-	}()
-	return z
-}
-
-// newBuffers instantiates new buffers which size matches the one in Header.
-// The returned buffers are for decompression and compression respectively.
-func (z *Writer) newBuffers() {
-	bSize := z.Header.BlockMaxSize
-	buf := getBuffer(bSize)
-	z.data = buf[:bSize] // Uncompressed buffer is the first half.
-}
-
-// freeBuffers puts the writer's buffers back to the pool.
-func (z *Writer) freeBuffers() {
-	// Put the buffer back into the pool, if any.
-	putBuffer(z.Header.BlockMaxSize, z.data)
-	z.data = nil
-}
-
-// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
-func (z *Writer) writeHeader() error {
-	// Default to 4Mb if BlockMaxSize is not set.
-	if z.Header.BlockMaxSize == 0 {
-		z.Header.BlockMaxSize = blockSize4M
-	}
-	// The only option that needs to be validated.
-	bSize := z.Header.BlockMaxSize
-	if !isValidBlockSize(z.Header.BlockMaxSize) {
-		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
-	}
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	z.newBuffers()
-	z.idx = 0
-
-	// Size is optional.
-	buf := z.buf[:]
-
-	// Set the fixed size data: magic number, block max size and flags.
-	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
-	flg := byte(Version << 6)
-	flg |= 1 << 5 // No block dependency.
-	if z.Header.BlockChecksum {
-		flg |= 1 << 4
-	}
-	if z.Header.Size > 0 {
-		flg |= 1 << 3
-	}
-	if !z.Header.NoChecksum {
-		flg |= 1 << 2
-	}
-	buf[4] = flg
-	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
-
-	// Current buffer size: magic(4) + flags(1) + block max size (1).
-	n := 6
-	// Optional items.
-	if z.Header.Size > 0 {
-		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
-		n += 8
-	}
-
-	// The header checksum includes the flags, block max size and optional Size.
-	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
-	z.checksum.Reset()
-
-	// Header ready, write it out.
-	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
-		return err
-	}
-	z.Header.done = true
-	if debugFlag {
-		debug("wrote header %v", z.Header)
-	}
-
-	return nil
-}
-
-// Write compresses data from the supplied buffer into the underlying io.Writer.
-// Write does not return until the data has been written.
-func (z *Writer) Write(buf []byte) (int, error) {
-	if !z.Header.done {
-		if err := z.writeHeader(); err != nil {
-			return 0, err
-		}
-	}
-	if debugFlag {
-		debug("input buffer len=%d index=%d", len(buf), z.idx)
-	}
-
-	zn := len(z.data)
-	var n int
-	for len(buf) > 0 {
-		if z.idx == 0 && len(buf) >= zn {
-			// Avoid a copy as there is enough data for a block.
-			if err := z.compressBlock(buf[:zn]); err != nil {
-				return n, err
-			}
-			n += zn
-			buf = buf[zn:]
-			continue
-		}
-		// Accumulate the data to be compressed.
-		m := copy(z.data[z.idx:], buf)
-		n += m
-		z.idx += m
-		buf = buf[m:]
-		if debugFlag {
-			debug("%d bytes copied to buf, current index %d", n, z.idx)
-		}
-
-		if z.idx < len(z.data) {
-			// Buffer not filled.
-			if debugFlag {
-				debug("need more data for compression")
-			}
-			return n, nil
-		}
-
-		// Buffer full.
-		if err := z.compressBlock(z.data); err != nil {
-			return n, err
-		}
-		z.idx = 0
-	}
-
-	return n, nil
-}
-
-// compressBlock compresses a block.
-func (z *Writer) compressBlock(data []byte) error {
-	if !z.NoChecksum {
-		_, _ = z.checksum.Write(data)
-	}
-
-	if z.c != nil {
-		c := make(chan zResult)
-		z.c <- c // Send now to guarantee order
-		go writerCompressBlock(c, z.Header, data)
-		return nil
-	}
-
-	zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
-	// The compressed block size cannot exceed the input's.
-	var zn int
-
-	if level := z.Header.CompressionLevel; level != 0 {
-		zn, _ = CompressBlockHC(data, zdata, level)
-	} else {
-		zn, _ = CompressBlock(data, zdata, z.hashtable[:])
-	}
-
-	var bLen uint32
-	if debugFlag {
-		debug("block compression %d => %d", len(data), zn)
-	}
-	if zn > 0 && zn < len(data) {
-		// Compressible and compressed size smaller than uncompressed: ok!
-		bLen = uint32(zn)
-		zdata = zdata[:zn]
-	} else {
-		// Uncompressed block.
-		bLen = uint32(len(data)) | compressedBlockFlag
-		zdata = data
-	}
-	if debugFlag {
-		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
-	}
-
-	// Write the block.
-	if err := z.writeUint32(bLen); err != nil {
-		return err
-	}
-	written, err := z.dst.Write(zdata)
-	if err != nil {
-		return err
-	}
-	if h := z.OnBlockDone; h != nil {
-		h(written)
-	}
-
-	if !z.BlockChecksum {
-		if debugFlag {
-			debug("current frame checksum %x", z.checksum.Sum32())
-		}
-		return nil
-	}
-	checksum := xxh32.ChecksumZero(zdata)
-	if debugFlag {
-		debug("block checksum %x", checksum)
-		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
-	}
-	return z.writeUint32(checksum)
-}
-
-// Flush flushes any pending compressed data to the underlying writer.
-// Flush does not return until the data has been written.
-// If the underlying writer returns an error, Flush returns that error.
-func (z *Writer) Flush() error {
-	if debugFlag {
-		debug("flush with index %d", z.idx)
-	}
-	if z.idx == 0 {
-		return nil
-	}
-
-	data := z.data[:z.idx]
-	z.idx = 0
-	if z.c == nil {
-		return z.compressBlock(data)
-	}
-	if !z.NoChecksum {
-		_, _ = z.checksum.Write(data)
-	}
-	c := make(chan zResult)
-	z.c <- c
-	writerCompressBlock(c, z.Header, data)
-	return nil
-}
-
-func (z *Writer) close() error {
-	if z.c == nil {
-		return nil
-	}
-	// Send a sentinel block (no data to compress) to terminate the writer main goroutine.
-	c := make(chan zResult)
-	z.c <- c
-	c <- zResult{}
-	// Wait for the main goroutine to complete.
-	<-c
-	// At this point the main goroutine has shut down or is about to return.
-	z.c = nil
-	return z.err
-}
-
-// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
-func (z *Writer) Close() error {
-	if !z.Header.done {
-		if err := z.writeHeader(); err != nil {
-			return err
-		}
-	}
-	if err := z.Flush(); err != nil {
-		return err
-	}
-	if err := z.close(); err != nil {
-		return err
-	}
-	z.freeBuffers()
-
-	if debugFlag {
-		debug("writing last empty block")
-	}
-	if err := z.writeUint32(0); err != nil {
-		return err
-	}
-	if z.NoChecksum {
-		return nil
-	}
-	checksum := z.checksum.Sum32()
-	if debugFlag {
-		debug("stream checksum %x", checksum)
-	}
-	return z.writeUint32(checksum)
-}
-
-// Reset clears the state of the Writer z such that it is equivalent to its
-// initial state from NewWriter, but instead writing to w.
-// No access to the underlying io.Writer is performed.
-func (z *Writer) Reset(w io.Writer) {
-	n := cap(z.c)
-	_ = z.close()
-	z.freeBuffers()
-	z.Header.Reset()
-	z.dst = w
-	z.checksum.Reset()
-	z.idx = 0
-	z.err = nil
-	// reset hashtable to ensure deterministic output.
-	for i := range z.hashtable {
-		z.hashtable[i] = 0
-	}
-	z.WithConcurrency(n)
-}
-
-// writeUint32 writes a uint32 to the underlying writer.
-func (z *Writer) writeUint32(x uint32) error {
-	buf := z.buf[:4]
-	binary.LittleEndian.PutUint32(buf, x)
-	_, err := z.dst.Write(buf)
-	return err
-}
-
-// writerCompressBlock compresses data into a pooled buffer and writes its result
-// out to the input channel.
-func writerCompressBlock(c chan zResult, header Header, data []byte) {
-	zdata := getBuffer(header.BlockMaxSize)
-	// The compressed block size cannot exceed the input's.
-	var zn int
-	if level := header.CompressionLevel; level != 0 {
-		zn, _ = CompressBlockHC(data, zdata, level)
-	} else {
-		var hashTable [winSize]int
-		zn, _ = CompressBlock(data, zdata, hashTable[:])
-	}
-	var res zResult
-	if zn > 0 && zn < len(data) {
-		res.size = uint32(zn)
-		res.data = zdata[:zn]
-	} else {
-		res.size = uint32(len(data)) | compressedBlockFlag
-		res.data = data
-	}
-	if header.BlockChecksum {
-		res.checksum = xxh32.ChecksumZero(res.data)
-	}
-	c <- res
-}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..b9cc55a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,18 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000..3460f03
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..c67ff1b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 0000000..450189f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+	path, version, sum := "unknown", "unknown", "unknown"
+	if bi, ok := debug.ReadBuildInfo(); ok {
+		path = bi.Main.Path
+		version = bi.Main.Version
+		sum = bi.Main.Sum
+	}
+	c := &selfCollector{MustNewConstMetric(
+		NewDesc(
+			"go_build_info",
+			"Build information about the main Go module.",
+			nil, Labels{"path": path, "version": version, "checksum": sum},
+		),
+		GaugeValue, 1)}
+	c.init(c.self)
+	return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..cf05079
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,128 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+	// Describe sends the super-set of all possible descriptors of metrics
+	// collected by this Collector to the provided channel and returns once
+	// the last descriptor has been sent. The sent descriptors fulfill the
+	// consistency and uniqueness requirements described in the Desc
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector. It may be called concurrently and
+	// therefore must be implemented in a concurrency safe way.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
+	Describe(chan<- *Desc)
+	// Collect is called by the Prometheus registry when collecting
+	// metrics. The implementation sends each collected metric via the
+	// provided channel and returns once the last metric has been sent. The
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
+	Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+//	func (c customCollector) Describe(ch chan<- *Desc) {
+//		DescribeByCollect(c, ch)
+//	}
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collector (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+	metrics := make(chan Metric)
+	go func() {
+		c.Collect(metrics)
+		close(metrics)
+	}()
+	for m := range metrics {
+		descs <- m.Desc()
+	}
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+	self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+	c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+	ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+	ch <- c.self
+}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+	Metric
+	Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
new file mode 100644
index 0000000..9a71a15
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// CollectorFunc is a convenient way to implement a Prometheus Collector
+// without interface boilerplate.
+// This implementation is based on DescribeByCollect method.
+// familiarize yourself to it before using.
+type CollectorFunc func(chan<- Metric)
+
+// Collect calls the defined CollectorFunc function with the provided Metrics channel
+func (f CollectorFunc) Collect(ch chan<- Metric) {
+	f(ch)
+}
+
+// Describe sends the descriptor information using DescribeByCollect
+func (f CollectorFunc) Describe(ch chan<- *Desc) {
+	DescribeByCollect(f, ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..4ce84e7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,358 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+	Metric
+	Collector
+
+	// Inc increments the counter by 1. Use Add to increment it by arbitrary
+	// non-negative values.
+	Inc()
+	// Add adds the given value to the counter. It panics if the value is <
+	// 0.
+	Add(float64)
+}
+
+// ExemplarAdder is implemented by Counters that offer the option of adding a
+// value to the Counter together with an exemplar. Its AddWithExemplar method
+// works like the Add method of the Counter interface but also replaces the
+// currently saved exemplar (if any) with a new one, created from the provided
+// value, the current time as timestamp, and the provided labels. Empty Labels
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
+// of the provided labels are invalid, or if the provided labels contain more
+// than 128 runes in total.
+type ExemplarAdder interface {
+	AddWithExemplar(value float64, exemplar Labels)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+	CounterOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation also implements ExemplarAdder. It is safe to
+// perform the corresponding type assertion.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
+	result.init(result) // Init self-collection.
+	result.createdTs = timestamppb.New(opts.now())
+	return result
+}
+
+type counter struct {
+	// valBits contains the bits of the represented float64 value, while
+	// valInt stores values that are exact integers. Both have to go first
+	// in the struct to guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+	valInt  uint64
+
+	selfCollector
+	desc *Desc
+
+	createdTs  *timestamppb.Timestamp
+	labelPairs []*dto.LabelPair
+	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+func (c *counter) Desc() *Desc {
+	return c.desc
+}
+
+func (c *counter) Add(v float64) {
+	if v < 0 {
+		panic(errors.New("counter cannot decrease in value"))
+	}
+
+	ival := uint64(v)
+	if float64(ival) == v {
+		atomic.AddUint64(&c.valInt, ival)
+		return
+	}
+
+	for {
+		oldBits := atomic.LoadUint64(&c.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (c *counter) AddWithExemplar(v float64, e Labels) {
+	c.Add(v)
+	c.updateExemplar(v, e)
+}
+
+func (c *counter) Inc() {
+	atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) get() float64 {
+	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+	ival := atomic.LoadUint64(&c.valInt)
+	return fval + float64(ival)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+	// Read the Exemplar first and the value second. This is to avoid a race condition
+	// where users see an exemplar for a not-yet-existing observation.
+	var exemplar *dto.Exemplar
+	if e := c.exemplar.Load(); e != nil {
+		exemplar = e.(*dto.Exemplar)
+	}
+	val := c.get()
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
+}
+
+func (c *counter) updateExemplar(v float64, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, c.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	c.exemplar.Store(e)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+	*MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+	return V2.NewCounterVec(CounterVecOpts{
+		CounterOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	return &CounterVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+			}
+			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
+			result.init(result) // Init self-collection.
+			result.createdTs = timestamppb.New(opts.now())
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+	c, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+	c, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &CounterVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+	Metric
+	Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+//
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..2331b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,211 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/cespare/xxhash/v2"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+	// fqName has been built from Namespace, Subsystem, and Name.
+	fqName string
+	// help provides some helpful information about this metric.
+	help string
+	// constLabelPairs contains precalculated DTO label pairs based on
+	// the constant labels.
+	constLabelPairs []*dto.LabelPair
+	// variableLabels contains names of labels and normalization function for
+	// which the metric maintains variable values.
+	variableLabels *compiledLabels
+	// id is a hash of the values of the ConstLabels and fqName. This
+	// must be unique among all registered descriptors and can therefore be
+	// used as an identifier of the descriptor.
+	id uint64
+	// dimHash is a hash of the label names (preset and variable) and the
+	// Help string. Each Desc with the same fqName must have the same
+	// dimHash.
+	dimHash uint64
+	// err is an error that occurred during construction. It is reported on
+	// registration time.
+	err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+	return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
+	d := &Desc{
+		fqName:         fqName,
+		help:           help,
+		variableLabels: variableLabels.compile(),
+	}
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	if !model.NameValidationScheme.IsValidMetricName(fqName) {
+		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+		return d
+	}
+	// labelValues contains the label values of const labels (in order of
+	// their sorted label names) plus the fqName (at position 0).
+	labelValues := make([]string, 1, len(constLabels)+1)
+	labelValues[0] = fqName
+	labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
+	labelNameSet := map[string]struct{}{}
+	// First add only the const label names and sort them...
+	for labelName := range constLabels {
+		if !checkLabelName(labelName) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+			return d
+		}
+		labelNames = append(labelNames, labelName)
+		labelNameSet[labelName] = struct{}{}
+	}
+	sort.Strings(labelNames)
+	// ... so that we can now add const label values in the order of their names.
+	for _, labelName := range labelNames {
+		labelValues = append(labelValues, constLabels[labelName])
+	}
+	// Validate the const label values. They can't have a wrong cardinality, so
+	// use in len(labelValues) as expectedNumberOfValues.
+	if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+		d.err = err
+		return d
+	}
+	// Now add the variable label names, but prefix them with something that
+	// cannot be in a regular label name. That prevents matching the label
+	// dimension with a different mix between preset and variable labels.
+	for _, label := range d.variableLabels.names {
+		if !checkLabelName(label) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
+			return d
+		}
+		labelNames = append(labelNames, "$"+label)
+		labelNameSet[label] = struct{}{}
+	}
+	if len(labelNames) != len(labelNameSet) {
+		d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
+		return d
+	}
+
+	xxh := xxhash.New()
+	for _, val := range labelValues {
+		xxh.WriteString(val)
+		xxh.Write(separatorByteSlice)
+	}
+	d.id = xxh.Sum64()
+	// Sort labelNames so that order doesn't matter for the hash.
+	sort.Strings(labelNames)
+	// Now hash together (in this order) the help string and the sorted
+	// label names.
+	xxh.Reset()
+	xxh.WriteString(help)
+	xxh.Write(separatorByteSlice)
+	for _, labelName := range labelNames {
+		xxh.WriteString(labelName)
+		xxh.Write(separatorByteSlice)
+	}
+	d.dimHash = xxh.Sum64()
+
+	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+	for n, v := range constLabels {
+		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+			Name:  proto.String(n),
+			Value: proto.String(v),
+		})
+	}
+	sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
+	return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+	return &Desc{
+		err: err,
+	}
+}
+
+func (d *Desc) String() string {
+	lpStrings := make([]string, 0, len(d.constLabelPairs))
+	for _, lp := range d.constLabelPairs {
+		lpStrings = append(
+			lpStrings,
+			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+		)
+	}
+	vlStrings := []string{}
+	if d.variableLabels != nil {
+		vlStrings = make([]string, 0, len(d.variableLabels.names))
+		for _, vl := range d.variableLabels.names {
+			if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+				vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+			} else {
+				vlStrings = append(vlStrings, vl)
+			}
+		}
+	}
+	return fmt.Sprintf(
+		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
+		d.fqName,
+		d.help,
+		strings.Join(lpStrings, ","),
+		strings.Join(vlStrings, ","),
+	)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..962608f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,210 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// # A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+//	package main
+//
+//	import (
+//		"log"
+//		"net/http"
+//
+//		"github.com/prometheus/client_golang/prometheus"
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
+//	)
+//
+//	type metrics struct {
+//		cpuTemp  prometheus.Gauge
+//		hdFailures *prometheus.CounterVec
+//	}
+//
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
+//		m := &metrics{
+//			cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+//				Name: "cpu_temperature_celsius",
+//				Help: "Current temperature of the CPU.",
+//			}),
+//			hdFailures: prometheus.NewCounterVec(
+//				prometheus.CounterOpts{
+//					Name: "hd_errors_total",
+//					Help: "Number of hard-disk errors.",
+//				},
+//				[]string{"device"},
+//			),
+//		}
+//		reg.MustRegister(m.cpuTemp)
+//		reg.MustRegister(m.hdFailures)
+//		return m
+//	}
+//
+//	func main() {
+//		// Create a non-global registry.
+//		reg := prometheus.NewRegistry()
+//
+//		// Create new metrics and register them using the custom registry.
+//		m := NewMetrics(reg)
+//		// Set values for the new created metrics.
+//		m.cpuTemp.Set(65.3)
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+//		// Expose metrics and custom registry via an HTTP server
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+//		log.Fatal(http.ListenAndServe(":8080", nil))
+//	}
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
+//
+// # Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
+//
+// # Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
+// for all metric types with just a float64 as their value: Counter, Gauge, and
+// a special “type” called Untyped. Use the latter if you are not sure if the
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
+// happens in the Collect method. The Describe method has to return separate
+// Desc instances, representative of the “throw-away” metrics to be created
+// later.  NewDesc comes in handy to create those Desc instances. Alternatively,
+// you could return no Desc at all, which will mark the Collector “unchecked”.
+// No checks are performed at registration time, but metric consistency will
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situation where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// # Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways.  You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// # HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+//
+// # Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// # Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// # Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 0000000..de5a856
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"encoding/json"
+	"expvar"
+)
+
+type expvarCollector struct {
+	exports map[string]*Desc
+}
+
+// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewExpvarCollector instead.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+	return &expvarCollector{
+		exports: exports,
+	}
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+	for _, desc := range e.exports {
+		ch <- desc
+	}
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+	for name, desc := range e.exports {
+		var m Metric
+		expVar := expvar.Get(name)
+		if expVar == nil {
+			continue
+		}
+		var v interface{}
+		labels := make([]string, len(desc.variableLabels.names))
+		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+			ch <- NewInvalidMetric(desc, err)
+			continue
+		}
+		var processValue func(v interface{}, i int)
+		processValue = func(v interface{}, i int) {
+			if i >= len(labels) {
+				copiedLabels := append(make([]string, 0, len(labels)), labels...)
+				switch v := v.(type) {
+				case float64:
+					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+				case bool:
+					if v {
+						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+					} else {
+						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+					}
+				default:
+					return
+				}
+				ch <- m
+				return
+			}
+			vm, ok := v.(map[string]interface{})
+			if !ok {
+				return
+			}
+			for lv, val := range vm {
+				labels[i] = lv
+				processValue(val, i+1)
+			}
+		}
+		processValue(v, 0)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 0000000..3d383a7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..dd2eac9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,311 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+	Metric
+	Collector
+
+	// Set sets the Gauge to an arbitrary value.
+	Set(float64)
+	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+	// values.
+	Inc()
+	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+	// values.
+	Dec()
+	// Add adds the given value to the Gauge. (The value can be negative,
+	// resulting in a decrease of the Gauge.)
+	Add(float64)
+	// Sub subtracts the given value from the Gauge. (The value can be
+	// negative, resulting in an increase of the Gauge.)
+	Sub(float64)
+
+	// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+	SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+	GaugeOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+	result.init(result) // Init self-collection.
+	return result
+}
+
+type gauge struct {
+	// valBits contains the bits of the represented float64 value. It has
+	// to go first in the struct to guarantee alignment for atomic
+	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+
+	selfCollector
+
+	desc       *Desc
+	labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+	return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+	atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+	g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+	g.Add(1)
+}
+
+func (g *gauge) Dec() {
+	g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+	for {
+		oldBits := atomic.LoadUint64(&g.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (g *gauge) Sub(val float64) {
+	g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+	*MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+	return V2.NewGaugeVec(GaugeVecOpts{
+		GaugeOpts:      opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &GaugeVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+			}
+			result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
+			result.init(result) // Init self-collection.
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+	g, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+	g, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &GaugeVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+	Metric
+	Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. Therefore, it must be safe to call the provided function
+// concurrently.
+//
+// NewGaugeFunc is a good way to create an “info” style metric with a constant
+// value of 1. Example:
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000..614fd61
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+	pid := os.Getpid()
+	return func() (int, error) {
+		return pid, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 0000000..eaf8059
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+	return func() (int, error) {
+		return 1, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..520cbd7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,274 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"runtime"
+	"runtime/debug"
+	"time"
+)
+
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+	return memStatsMetrics{
+		{
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes"),
+				"Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes_total"),
+				"Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("sys_bytes"),
+				"Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mallocs_total"),
+				// TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+				"Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("frees_total"),
+				"Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_alloc_bytes"),
+				"Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_sys_bytes"),
+				"Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_idle_bytes"),
+				"Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_inuse_bytes"),
+				"Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_released_bytes"),
+				"Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_objects"),
+				"Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_inuse_bytes"),
+				"Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_sys_bytes"),
+				"Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_inuse_bytes"),
+				"Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_sys_bytes"),
+				"Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_inuse_bytes"),
+				"Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_sys_bytes"),
+				"Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("buck_hash_sys_bytes"),
+				"Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("gc_sys_bytes"),
+				"Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("other_sys_bytes"),
+				"Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("next_gc_bytes"),
+				"Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+			valType: GaugeValue,
+		},
+	}
+}
+
+type baseGoCollector struct {
+	goroutinesDesc *Desc
+	threadsDesc    *Desc
+	gcDesc         *Desc
+	gcLastTimeDesc *Desc
+	goInfoDesc     *Desc
+}
+
+func newBaseGoCollector() baseGoCollector {
+	return baseGoCollector{
+		goroutinesDesc: NewDesc(
+			"go_goroutines",
+			"Number of goroutines that currently exist.",
+			nil, nil),
+		threadsDesc: NewDesc(
+			"go_threads",
+			"Number of OS threads created.",
+			nil, nil),
+		gcDesc: NewDesc(
+			"go_gc_duration_seconds",
+			"A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+			nil, nil),
+		gcLastTimeDesc: NewDesc(
+			"go_memstats_last_gc_time_seconds",
+			"Number of seconds since 1970 of last garbage collection.",
+			nil, nil),
+		goInfoDesc: NewDesc(
+			"go_info",
+			"Information about the Go environment.",
+			nil, Labels{"version": runtime.Version()}),
+	}
+}
+
+// Describe returns all descriptions of the collector.
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
+	ch <- c.goroutinesDesc
+	ch <- c.threadsDesc
+	ch <- c.gcDesc
+	ch <- c.gcLastTimeDesc
+	ch <- c.goInfoDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
+	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+
+	n := getRuntimeNumThreads()
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
+
+	var stats debug.GCStats
+	stats.PauseQuantiles = make([]time.Duration, 5)
+	debug.ReadGCStats(&stats)
+
+	quantiles := make(map[float64]float64)
+	for idx, pq := range stats.PauseQuantiles[1:] {
+		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+	}
+	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+	ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
+	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+}
+
+func memstatNamespace(s string) string {
+	return "go_memstats_" + s
+}
+
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
+type memStatsMetrics []struct {
+	desc    *Desc
+	eval    func(*runtime.MemStats) float64
+	valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 0000000..897a6e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+	"runtime"
+	"sync"
+	"time"
+)
+
+type goCollector struct {
+	base baseGoCollector
+
+	// ms... are memstats related.
+	msLast          *runtime.MemStats // Previously collected memstats.
+	msLastTimestamp time.Time
+	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
+	msMetrics       memStatsMetrics
+	msRead          func(*runtime.MemStats) // For mocking in tests.
+	msMaxWait       time.Duration           // Wait time for fresh memstats.
+	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+	msMetrics := goRuntimeMemStats()
+	msMetrics = append(msMetrics, struct {
+		desc    *Desc
+		eval    func(*runtime.MemStats) float64
+		valType ValueType
+	}{
+		// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+		desc: NewDesc(
+			memstatNamespace("gc_cpu_fraction"),
+			"The fraction of this program's available CPU time used by the GC since the program started.",
+			nil, nil,
+		),
+		eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+		valType: GaugeValue,
+	})
+	return &goCollector{
+		base:      newBaseGoCollector(),
+		msLast:    &runtime.MemStats{},
+		msRead:    runtime.ReadMemStats,
+		msMaxWait: time.Second,
+		msMaxAge:  5 * time.Minute,
+		msMetrics: msMetrics,
+	}
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	var (
+		ms   = &runtime.MemStats{}
+		done = make(chan struct{})
+	)
+	// Start reading memstats first as it might take a while.
+	go func() {
+		c.msRead(ms)
+		c.msMtx.Lock()
+		c.msLast = ms
+		c.msLastTimestamp = time.Now()
+		c.msMtx.Unlock()
+		close(done)
+	}()
+
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	timer := time.NewTimer(c.msMaxWait)
+	select {
+	case <-done: // Our own ReadMemStats succeeded in time. Use it.
+		timer.Stop() // Important for high collection frequencies to not pile up timers.
+		c.msCollect(ch, ms)
+		return
+	case <-timer.C: // Time out, use last memstats if possible. Continue below.
+	}
+	c.msMtx.Lock()
+	if time.Since(c.msLastTimestamp) < c.msMaxAge {
+		// Last memstats are recent enough. Collect from them under the lock.
+		c.msCollect(ch, c.msLast)
+		c.msMtx.Unlock()
+		return
+	}
+	// If we are here, the last memstats are too old or don't exist. We have
+	// to wait until our own ReadMemStats finally completes. For that to
+	// happen, we have to release the lock.
+	c.msMtx.Unlock()
+	<-done
+	c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+	for _, i := range c.msMetrics {
+		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 0000000..6b86847
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"runtime/metrics"
+	"strings"
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+)
+
+const (
+	// constants for strings referenced more than once.
+	goGCHeapTinyAllocsObjects               = "/gc/heap/tiny/allocs:objects"
+	goGCHeapAllocsObjects                   = "/gc/heap/allocs:objects"
+	goGCHeapFreesObjects                    = "/gc/heap/frees:objects"
+	goGCHeapFreesBytes                      = "/gc/heap/frees:bytes"
+	goGCHeapAllocsBytes                     = "/gc/heap/allocs:bytes"
+	goGCHeapObjects                         = "/gc/heap/objects:objects"
+	goGCHeapGoalBytes                       = "/gc/heap/goal:bytes"
+	goMemoryClassesTotalBytes               = "/memory/classes/total:bytes"
+	goMemoryClassesHeapObjectsBytes         = "/memory/classes/heap/objects:bytes"
+	goMemoryClassesHeapUnusedBytes          = "/memory/classes/heap/unused:bytes"
+	goMemoryClassesHeapReleasedBytes        = "/memory/classes/heap/released:bytes"
+	goMemoryClassesHeapFreeBytes            = "/memory/classes/heap/free:bytes"
+	goMemoryClassesHeapStacksBytes          = "/memory/classes/heap/stacks:bytes"
+	goMemoryClassesOSStacksBytes            = "/memory/classes/os-stacks:bytes"
+	goMemoryClassesMetadataMSpanInuseBytes  = "/memory/classes/metadata/mspan/inuse:bytes"
+	goMemoryClassesMetadataMSPanFreeBytes   = "/memory/classes/metadata/mspan/free:bytes"
+	goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+	goMemoryClassesMetadataMCacheFreeBytes  = "/memory/classes/metadata/mcache/free:bytes"
+	goMemoryClassesProfilingBucketsBytes    = "/memory/classes/profiling/buckets:bytes"
+	goMemoryClassesMetadataOtherBytes       = "/memory/classes/metadata/other:bytes"
+	goMemoryClassesOtherBytes               = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+	goGCHeapTinyAllocsObjects,
+	goGCHeapAllocsObjects,
+	goGCHeapFreesObjects,
+	goGCHeapAllocsBytes,
+	goGCHeapObjects,
+	goGCHeapGoalBytes,
+	goMemoryClassesTotalBytes,
+	goMemoryClassesHeapObjectsBytes,
+	goMemoryClassesHeapUnusedBytes,
+	goMemoryClassesHeapReleasedBytes,
+	goMemoryClassesHeapFreeBytes,
+	goMemoryClassesHeapStacksBytes,
+	goMemoryClassesOSStacksBytes,
+	goMemoryClassesMetadataMSpanInuseBytes,
+	goMemoryClassesMetadataMSPanFreeBytes,
+	goMemoryClassesMetadataMCacheInuseBytes,
+	goMemoryClassesMetadataMCacheFreeBytes,
+	goMemoryClassesProfilingBucketsBytes,
+	goMemoryClassesMetadataOtherBytes,
+	goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+	ret := make([]metrics.Description, 0, len(lookup))
+	for _, rm := range metrics.All() {
+		for _, m := range lookup {
+			if m == rm.Name {
+				ret = append(ret, rm)
+			}
+		}
+	}
+	return ret
+}
+
+type goCollector struct {
+	base baseGoCollector
+
+	// mu protects updates to all fields ensuring a consistent
+	// snapshot is always produced by Collect.
+	mu sync.Mutex
+
+	// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+	sampleBuf []metrics.Sample
+	// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+	sampleMap map[string]*metrics.Sample
+
+	// rmExposedMetrics represents all runtime/metrics package metrics
+	// that were configured to be exposed.
+	rmExposedMetrics     []collectorMetric
+	rmExactSumMapForHist map[string]string
+
+	// With Go 1.17, the runtime/metrics package was introduced.
+	// From that point on, metric names produced by the runtime/metrics
+	// package could be generated from runtime/metrics names. However,
+	// these differ from the old names for the same values.
+	//
+	// This field exists to export the same values under the old names
+	// as well.
+	msMetrics        memStatsMetrics
+	msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+	metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+	var descs []rmMetricDesc
+	for _, d := range metrics.All() {
+		var (
+			deny = true
+			desc rmMetricDesc
+		)
+
+		for _, r := range rules {
+			if !r.Matcher.MatchString(d.Name) {
+				continue
+			}
+			deny = r.Deny
+		}
+		if deny {
+			continue
+		}
+
+		desc.Description = d
+		descs = append(descs, desc)
+	}
+	return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+	return internal.GoCollectorOptions{
+		RuntimeMetricSumForHist: map[string]string{
+			"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+			"/gc/heap/frees-by-size:bytes":  goGCHeapFreesBytes,
+		},
+		RuntimeMetricRules: []internal.GoCollectorRule{
+			// Recommended metrics we want by default from runtime/metrics.
+			{Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+		},
+	}
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+	opt := defaultGoCollectorOptions()
+	for _, o := range opts {
+		o(&opt)
+	}
+
+	exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+	// Collect all histogram samples so that we can get their buckets.
+	// The API guarantees that the buckets are always fixed for the lifetime
+	// of the process.
+	var histograms []metrics.Sample
+	for _, d := range exposedDescriptions {
+		if d.Kind == metrics.KindFloat64Histogram {
+			histograms = append(histograms, metrics.Sample{Name: d.Name})
+		}
+	}
+
+	if len(histograms) > 0 {
+		metrics.Read(histograms)
+	}
+
+	bucketsMap := make(map[string][]float64)
+	for i := range histograms {
+		bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+	}
+
+	// Generate a collector for each exposed runtime/metrics metric.
+	metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+	// SampleBuf is used for reading from runtime/metrics.
+	// We are assuming the largest case to have stable pointers for sampleMap purposes.
+	sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+	sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+	for _, d := range exposedDescriptions {
+		namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+		if !ok {
+			// Just ignore this metric; we can't do anything with it here.
+			// If a user decides to use the latest version of Go, we don't want
+			// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+			continue
+		}
+		help := attachOriginalName(d.Description.Description, d.Name)
+
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+		sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+		var m collectorMetric
+		if d.Kind == metrics.KindFloat64Histogram {
+			_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+			unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+			m = newBatchHistogram(
+				NewDesc(
+					BuildFQName(namespace, subsystem, name),
+					help,
+					nil,
+					nil,
+				),
+				internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+				hasSum,
+			)
+		} else if d.Cumulative {
+			m = NewCounter(CounterOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			},
+			)
+		} else {
+			m = NewGauge(GaugeOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			})
+		}
+		metricSet = append(metricSet, m)
+	}
+
+	// Add exact sum metrics to sampleBuf if not added before.
+	for _, h := range histograms {
+		sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+		if !ok {
+			continue
+		}
+
+		if _, ok := sampleMap[sumMetric]; ok {
+			continue
+		}
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+		sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+	}
+
+	var (
+		msMetrics      memStatsMetrics
+		msDescriptions []metrics.Description
+	)
+
+	if !opt.DisableMemStatsLikeMetrics {
+		msMetrics = goRuntimeMemStats()
+		msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+		// Check if metric was not exposed before and if not, add to sampleBuf.
+		for _, mdDesc := range msDescriptions {
+			if _, ok := sampleMap[mdDesc.Name]; ok {
+				continue
+			}
+			sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+			sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+		}
+	}
+
+	return &goCollector{
+		base:                 newBaseGoCollector(),
+		sampleBuf:            sampleBuf,
+		sampleMap:            sampleMap,
+		rmExposedMetrics:     metricSet,
+		rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+		msMetrics:            msMetrics,
+		msMetricsEnabled:     !opt.DisableMemStatsLikeMetrics,
+	}
+}
+
+func attachOriginalName(desc, origName string) string {
+	return fmt.Sprintf("%s Sourced from %s.", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+	for _, m := range c.rmExposedMetrics {
+		ch <- m.Desc()
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	if len(c.sampleBuf) == 0 {
+		return
+	}
+
+	// Collect must be thread-safe, so prevent concurrent use of
+	// sampleBuf elements. Just read into sampleBuf but write all the data
+	// we get into our Metrics or MemStats.
+	//
+	// This lock also ensures that the Metrics we send out are all from
+	// the same updates, ensuring their mutual consistency insofar as
+	// is guaranteed by the runtime/metrics package.
+	//
+	// N.B. This locking is heavy-handed, but Collect is expected to be called
+	// relatively infrequently. Also the core operation here, metrics.Read,
+	// is fast (O(tens of microseconds)) so contention should certainly be
+	// low, though channel operations and any allocations may add to that.
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// Populate runtime/metrics sample buffer.
+	metrics.Read(c.sampleBuf)
+
+	// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+	for i, metric := range c.rmExposedMetrics {
+		// We created samples for exposed metrics first in order, so indexes match.
+		sample := c.sampleBuf[i]
+
+		// N.B. switch on concrete type because it's significantly more efficient
+		// than checking for the Counter and Gauge interface implementations. In
+		// this case, we control all the types here.
+		switch m := metric.(type) {
+		case *counter:
+			// Guard against decreases. This should never happen, but a failure
+			// to do so will result in a panic, which is a harsh consequence for
+			// a metrics collection bug.
+			v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+			if v1 > v0 {
+				m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+			}
+			m.Collect(ch)
+		case *gauge:
+			m.Set(unwrapScalarRMValue(sample.Value))
+			m.Collect(ch)
+		case *batchHistogram:
+			m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+			m.Collect(ch)
+		default:
+			panic("unexpected metric type")
+		}
+	}
+
+	if c.msMetricsEnabled {
+		// ms is a dummy MemStats that we populate ourselves so that we can
+		// populate the old metrics from it if goMemStatsCollection is enabled.
+		var ms runtime.MemStats
+		memStatsFromRM(&ms, c.sampleMap)
+		for _, i := range c.msMetrics {
+			ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+		}
+	}
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+	switch v.Kind() {
+	case metrics.KindUint64:
+		return float64(v.Uint64())
+	case metrics.KindFloat64:
+		return v.Float64()
+	case metrics.KindBad:
+		// Unsupported metric.
+		//
+		// This should never happen because we always populate our metric
+		// set from the runtime/metrics package.
+		panic("unexpected bad kind metric")
+	default:
+		// Unsupported metric kind.
+		//
+		// This should never happen because we check for this during initialization
+		// and flag and filter metrics whose kinds we don't understand.
+		panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+	}
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+	sumName, ok := c.rmExactSumMapForHist[rmName]
+	if !ok {
+		return 0
+	}
+	s, ok := c.sampleMap[sumName]
+	if !ok {
+		return 0
+	}
+	return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+	lookupOrZero := func(name string) uint64 {
+		if s, ok := rm[name]; ok {
+			return s.Value.Uint64()
+		}
+		return 0
+	}
+
+	// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+	// The reason for this is because MemStats couldn't be extended at the time
+	// but there was a desire to have Mallocs at least be a little more representative,
+	// while having Mallocs - Frees still represent a live object count.
+	// Unfortunately, MemStats doesn't actually export a large allocation count,
+	// so it's impossible to pull this number out directly.
+	tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+	ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+	ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+	ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+	ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+	ms.Lookups = 0 // Already always zero.
+	ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+	ms.Alloc = ms.HeapAlloc
+	ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+	ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+	ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+	ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+	ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+	ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+	ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+	ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+	ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+	ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+	ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+	ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+	ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+	ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+	ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+	// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+	// and often misleading due to the fact that it's an average over the lifetime
+	// of the process.
+	// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+	// for more details.
+	ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+	selfCollector
+
+	// Static fields updated only once.
+	desc   *Desc
+	hasSum bool
+
+	// Because this histogram operates in batches, it just uses a
+	// single mutex for everything. updates are always serialized
+	// but Write calls may operate concurrently with updates.
+	// Contention between these two sources should be rare.
+	mu      sync.Mutex
+	buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+	counts  []uint64
+	sum     float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+	// We need to remove -Inf values. runtime/metrics keeps them around.
+	// But -Inf bucket should not be allowed for prometheus histograms.
+	if buckets[0] == math.Inf(-1) {
+		buckets = buckets[1:]
+	}
+	h := &batchHistogram{
+		desc:    desc,
+		buckets: buckets,
+		// Because buckets follows runtime/metrics conventions, there's
+		// 1 more value in the buckets list than there are buckets represented,
+		// because in runtime/metrics, the bucket values represent *boundaries*,
+		// and non-Inf boundaries are inclusive lower bounds for that bucket.
+		counts: make([]uint64, len(buckets)-1),
+		hasSum: hasSum,
+	}
+	h.init(h)
+	return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+	counts, buckets := his.Counts, his.Buckets
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	// Clear buckets.
+	for i := range h.counts {
+		h.counts[i] = 0
+	}
+	// Copy and reduce buckets.
+	var j int
+	for i, count := range counts {
+		h.counts[j] += count
+		if buckets[i+1] == h.buckets[j+1] {
+			j++
+		}
+	}
+	if h.hasSum {
+		h.sum = sum
+	}
+}
+
+func (h *batchHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	sum := float64(0)
+	if h.hasSum {
+		sum = h.sum
+	}
+	dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+	totalCount := uint64(0)
+	for i, count := range h.counts {
+		totalCount += count
+		if !h.hasSum {
+			if count != 0 {
+				// N.B. This computed sum is an underestimate.
+				sum += h.buckets[i] * float64(count)
+			}
+		}
+
+		// Skip the +Inf bucket, but only for the bucket list.
+		// It must still count for sum and totalCount.
+		if math.IsInf(h.buckets[i+1], 1) {
+			break
+		}
+		// Float64Histogram's upper bound is exclusive, so make it inclusive
+		// by obtaining the next float64 value down, in order.
+		upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+		dtoBuckets = append(dtoBuckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(totalCount),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+	out.Histogram = &dto.Histogram{
+		Bucket:      dtoBuckets,
+		SampleCount: proto.Uint64(totalCount),
+		SampleSum:   proto.Float64(sum),
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..c453b75
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,2056 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+	nativeHistogramSchemaMaximum = 8
+	nativeHistogramSchemaMinimum = -4
+)
+
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// 	// Populate nativeHistogramBounds.
+// 	numBuckets := 1
+// 	for i := range nativeHistogramBounds {
+// 		bounds := []float64{0.5}
+// 		factor := math.Exp2(math.Exp2(float64(-i)))
+// 		for j := 0; j < numBuckets-1; j++ {
+// 			var bound float64
+// 			if (j+1)%2 == 0 {
+// 				// Use previously calculated value for increased precision.
+// 				bound = nativeHistogramBounds[i-1][j/2+1]
+// 			} else {
+// 				bound = bounds[j] * factor
+// 			}
+// 			bounds = append(bounds, bound)
+// 		}
+// 		numBuckets *= 2
+// 		nativeHistogramBounds[i] = bounds
+// 	}
+// }
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the histogram. Observations are
+	// usually positive or zero. Negative observations are accepted but
+	// prevent current versions of Prometheus from properly detecting
+	// counter resets in the sum of observations. (The experimental Native
+	// Histograms handle negative observations properly.) See
+	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+	// for details.
+	Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in histograms", bucketLabel,
+)
+
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+	if count < 1 {
+		panic("LinearBuckets needs a positive count")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start += width
+	}
+	return buckets
+}
+
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBuckets needs a positive count")
+	}
+	if start <= 0 {
+		panic("ExponentialBuckets needs a positive start value")
+	}
+	if factor <= 1 {
+		panic("ExponentialBuckets needs a factor greater than 1")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start *= factor
+	}
+	return buckets
+}
+
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBucketsRange count needs a positive count")
+	}
+	if minBucket <= 0 {
+		panic("ExponentialBucketsRange min needs to be greater than 0")
+	}
+
+	// Formula for exponential buckets.
+	// max = min*growthFactor^(bucketCount-1)
+
+	// We know max/min and highest bucket. Solve for growthFactor.
+	growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
+
+	// Now that we know growthFactor, solve for each bucket.
+	buckets := make([]float64, count)
+	for i := 1; i <= count; i++ {
+		buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
+	}
+	return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Histogram (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Histogram must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Histogram.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Buckets defines the buckets into which observations are counted. Each
+	// element in the slice is the upper inclusive bound of a bucket. The
+	// values must be sorted in strictly increasing order. There is no need
+	// to add a highest bucket with +Inf bound, it will be added
+	// implicitly. If Buckets is left as nil or set to a slice of length
+	// zero, it is replaced by default buckets. The default buckets are
+	// DefBuckets if no buckets for a native histogram (see below) are used,
+	// otherwise the default is no buckets. (In other words, if you want to
+	// use both regular buckets and buckets for a native histogram, you have
+	// to define the regular buckets here explicitly.)
+	Buckets []float64
+
+	// If NativeHistogramBucketFactor is greater than one, so-called sparse
+	// buckets are used (in addition to the regular buckets, if defined
+	// above). A Histogram with sparse buckets will be ingested as a Native
+	// Histogram by a Prometheus server with that feature enabled (requires
+	// Prometheus v2.40+). Sparse buckets are exponential buckets covering
+	// the whole float64 range (with the exception of the “zero” bucket, see
+	// NativeHistogramZeroThreshold below). From any one bucket to the next,
+	// the width of the bucket grows by a constant
+	// factor. NativeHistogramBucketFactor provides an upper bound for this
+	// factor (exception see below). The smaller
+	// NativeHistogramBucketFactor, the more buckets will be used and thus
+	// the more costly the histogram will become. A generally good trade-off
+	// between cost and accuracy is a value of 1.1 (each bucket is at most
+	// 10% wider than the previous one), which will result in each power of
+	// two divided into 8 buckets (e.g. there will be 8 buckets between 1
+	// and 2, same as between 2 and 4, and 4 and 8, etc.).
+	//
+	// Details about the actually used factor: The factor is calculated as
+	// 2^(2^-n), where n is an integer number between (and including) -4 and
+	// 8. n is chosen so that the resulting factor is the largest that is
+	// still smaller or equal to NativeHistogramBucketFactor. Note that the
+	// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+	// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+	// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+	// it is larger than the provided NativeHistogramBucketFactor.
+	//
+	// NOTE: Native Histograms are still an experimental feature. Their
+	// behavior might still change without a major version
+	// bump. Subsequently, all NativeHistogram... options here might still
+	// change their behavior or name (or might completely disappear) without
+	// a major version bump.
+	NativeHistogramBucketFactor float64
+	// All observations with an absolute value of less or equal
+	// NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+	// For best results, this should be close to a bucket boundary. This is
+	// usually the case if picking a power of two. If
+	// NativeHistogramZeroThreshold is left at zero,
+	// DefNativeHistogramZeroThreshold is used as the threshold. To
+	// configure a zero bucket with an actual threshold of zero (i.e. only
+	// observations of precisely zero will go into the zero bucket), set
+	// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+	// constant (or any negative float value).
+	NativeHistogramZeroThreshold float64
+
+	// The next three fields define a strategy to limit the number of
+	// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+	// at zero, the number of buckets is not limited. (Note that this might
+	// lead to unbounded memory consumption if the values observed by the
+	// Histogram are sufficiently wide-spread. In particular, this could be
+	// used as a DoS attack vector. Where the observed values depend on
+	// external inputs, it is highly recommended to set a
+	// NativeHistogramMaxBucketNumber.) Once the set
+	// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+	// enacted:
+	//  - First, if the last reset (or the creation) of the histogram is at
+	//    least NativeHistogramMinResetDuration ago, then the whole
+	//    histogram is reset to its initial state (including regular
+	//    buckets).
+	//  - If less time has passed, or if NativeHistogramMinResetDuration is
+	//    zero, no reset is performed. Instead, the zero threshold is
+	//    increased sufficiently to reduce the number of buckets to or below
+	//    NativeHistogramMaxBucketNumber, but not to more than
+	//    NativeHistogramMaxZeroThreshold. Thus, if
+	//    NativeHistogramMaxZeroThreshold is already at or below the current
+	//    zero threshold, nothing happens at this step.
+	//  - After that, if the number of buckets still exceeds
+	//    NativeHistogramMaxBucketNumber, the resolution of the histogram is
+	//    reduced by doubling the width of the sparse buckets (up to a
+	//    growth factor between one bucket to the next of 2^(2^4) = 65536,
+	//    see above).
+	//  - Any increased zero threshold or reduced resolution is reset back
+	//    to their original values once NativeHistogramMinResetDuration has
+	//    passed (since the last reset or the creation of the histogram).
+	NativeHistogramMaxBucketNumber  uint32
+	NativeHistogramMinResetDuration time.Duration
+	NativeHistogramMaxZeroThreshold float64
+
+	// NativeHistogramMaxExemplars limits the number of exemplars
+	// that are kept in memory for each native histogram. If you leave it at
+	// zero, a default value of 10 is used. If no exemplars should be kept specifically
+	// for native histograms, set it to a negative value. (Scrapers can
+	// still use the exemplars exposed for classic buckets, which are managed
+	// independently.)
+	NativeHistogramMaxExemplars int
+	// NativeHistogramExemplarTTL is only checked once
+	// NativeHistogramMaxExemplars is exceeded. In that case, the
+	// oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+	// Otherwise, the older exemplar in the pair of exemplars that are closest
+	// together (on an exponential scale) is removed.
+	// If NativeHistogramExemplarTTL is left at its zero value, a default value of
+	// 5m is used. To always delete the oldest exemplar, set it to a negative value.
+	NativeHistogramExemplarTTL time.Duration
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+	HistogramOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+//
+// The returned implementation also implements ExemplarObserver. It is safe to
+// perform the corresponding type assertion. Exemplars are tracked separately
+// for each bucket.
+func NewHistogram(opts HistogramOpts) Histogram {
+	return newHistogram(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+	}
+
+	for _, n := range desc.variableLabels.names {
+		if n == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	if opts.afterFunc == nil {
+		opts.afterFunc = time.AfterFunc
+	}
+
+	h := &histogram{
+		desc:                            desc,
+		upperBounds:                     opts.Buckets,
+		labelPairs:                      MakeLabelPairs(desc, labelValues),
+		nativeHistogramMaxBuckets:       opts.NativeHistogramMaxBucketNumber,
+		nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+		lastResetTime:                   opts.now(),
+		now:                             opts.now,
+		afterFunc:                       opts.afterFunc,
+	}
+	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+		h.upperBounds = DefBuckets
+	}
+	if opts.NativeHistogramBucketFactor <= 1 {
+		h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+	} else {
+		switch {
+		case opts.NativeHistogramZeroThreshold > 0:
+			h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+		case opts.NativeHistogramZeroThreshold == 0:
+			h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+		} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+		h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+		h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
+	}
+	for i, upperBound := range h.upperBounds {
+		if i < len(h.upperBounds)-1 {
+			if upperBound >= h.upperBounds[i+1] {
+				panic(fmt.Errorf(
+					"histogram buckets must be in increasing order: %f >= %f",
+					upperBound, h.upperBounds[i+1],
+				))
+			}
+		} else {
+			if math.IsInf(upperBound, +1) {
+				// The +Inf bucket is implicit. Remove it here.
+				h.upperBounds = h.upperBounds[:i]
+			}
+		}
+	}
+	// Finally we know the final length of h.upperBounds and can make buckets
+	// for both counts as well as exemplars:
+	h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+	h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
+	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
+
+	h.init(h) // Init self-collection.
+	return h
+}
+
+type histogramCounts struct {
+	// Order in this struct matters for the alignment required by atomic
+	// operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations.
+	sumBits uint64
+	count   uint64
+
+	// nativeHistogramZeroBucket counts all (positive and negative)
+	// observations in the zero bucket (with an absolute value less or equal
+	// the current threshold, see next field.
+	nativeHistogramZeroBucket uint64
+	// nativeHistogramZeroThresholdBits is the bit pattern of the current
+	// threshold for the zero bucket. It's initially equal to
+	// nativeHistogramZeroThreshold but may change according to the bucket
+	// count limitation strategy.
+	nativeHistogramZeroThresholdBits uint64
+	// nativeHistogramSchema may change over time according to the bucket
+	// count limitation strategy and therefore has to be saved here.
+	nativeHistogramSchema int32
+	// Number of (positive and negative) sparse buckets.
+	nativeHistogramBucketsNumber uint32
+
+	// Regular buckets.
+	buckets []uint64
+
+	// The sparse buckets for native histograms are implemented with a
+	// sync.Map for now. A dedicated data structure will likely be more
+	// efficient. There are separate maps for negative and positive
+	// observations. The map's value is an *int64, counting observations in
+	// that bucket. (Note that we don't use uint64 as an int64 won't
+	// overflow in practice, and working with signed numbers from the
+	// beginning simplifies the handling of deltas.) The map's key is the
+	// index of the bucket according to the used
+	// nativeHistogramSchema. Index 0 is for an upper bound of 1.
+	nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+	if bucket < len(hc.buckets) {
+		atomic.AddUint64(&hc.buckets[bucket], 1)
+	}
+	atomicAddFloat(&hc.sumBits, v)
+	if doSparse && !math.IsNaN(v) {
+		var (
+			key                  int
+			schema               = atomic.LoadInt32(&hc.nativeHistogramSchema)
+			zeroThreshold        = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+			bucketCreated, isInf bool
+		)
+		if math.IsInf(v, 0) {
+			// Pretend v is MaxFloat64 but later increment key by one.
+			if math.IsInf(v, +1) {
+				v = math.MaxFloat64
+			} else {
+				v = -math.MaxFloat64
+			}
+			isInf = true
+		}
+		frac, exp := math.Frexp(math.Abs(v))
+		if schema > 0 {
+			bounds := nativeHistogramBounds[schema]
+			key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+		} else {
+			key = exp
+			if frac == 0.5 {
+				key--
+			}
+			offset := (1 << -schema) - 1
+			key = (key + offset) >> -schema
+		}
+		if isInf {
+			key++
+		}
+		switch {
+		case v > zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+		case v < -zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+		default:
+			atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+		}
+		if bucketCreated {
+			atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hc.count, 1)
+}
+
+type histogram struct {
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// histogramCounts, as a marker for completion.
+	//
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the histogram) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cold fields must
+	// be merged into the new hot before releasing writeMtx.
+	//
+	// Fields with atomic access first! See alignment constraint:
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc *Desc
+
+	// Only used in the Write method and for sparse bucket management.
+	mtx sync.Mutex
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*histogramCounts
+
+	upperBounds                     []float64
+	labelPairs                      []*dto.LabelPair
+	exemplars                       []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	nativeHistogramSchema           int32          // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+	nativeHistogramZeroThreshold    float64        // The initial zero threshold.
+	nativeHistogramMaxZeroThreshold float64
+	nativeHistogramMaxBuckets       uint32
+	nativeHistogramMinResetDuration time.Duration
+	// lastResetTime is protected by mtx. It is also used as created timestamp.
+	lastResetTime time.Time
+	// resetScheduled is protected by mtx. It is true if a reset is
+	// scheduled for a later time (when nativeHistogramMinResetDuration has
+	// passed).
+	resetScheduled  bool
+	nativeExemplars nativeExemplars
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
+}
+
+func (h *histogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+	h.observe(v, h.findBucket(v))
+}
+
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
+func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
+	i := h.findBucket(v)
+	h.observe(v, i)
+	h.updateExemplar(v, i, e)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := h.counts[n>>63]
+	coldCounts := h.counts[(^n)>>63]
+
+	waitForCooldown(count, coldCounts)
+
+	his := &dto.Histogram{
+		Bucket:           make([]*dto.Bucket, len(h.upperBounds)),
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: timestamppb.New(h.lastResetTime),
+	}
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	var cumCount uint64
+	for i, upperBound := range h.upperBounds {
+		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+		his.Bucket[i] = &dto.Bucket{
+			CumulativeCount: proto.Uint64(cumCount),
+			UpperBound:      proto.Float64(upperBound),
+		}
+		if e := h.exemplars[i].Load(); e != nil {
+			his.Bucket[i].Exemplar = e.(*dto.Exemplar)
+		}
+	}
+	// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
+	if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
+		b := &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(math.Inf(1)),
+			Exemplar:        e.(*dto.Exemplar),
+		}
+		his.Bucket = append(his.Bucket, b)
+	}
+	if h.nativeHistogramSchema > math.MinInt32 {
+		his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+		his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+		zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+		defer func() {
+			coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+			coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+		}()
+
+		his.ZeroCount = proto.Uint64(zeroBucket)
+		his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+		his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+		// Add a no-op span to a histogram without observations and with
+		// a zero threshold of zero. Otherwise, a native histogram would
+		// look like a classic histogram to scrapers.
+		if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+			his.PositiveSpan = []*dto.BucketSpan{{
+				Offset: proto.Int32(0),
+				Length: proto.Uint32(0),
+			}}
+		}
+
+		if h.nativeExemplars.isEnabled() {
+			h.nativeExemplars.Lock()
+			his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+			h.nativeExemplars.Unlock()
+		}
+
+	}
+	addAndResetCounts(hotCounts, coldCounts)
+	return nil
+}
+
+// findBucket returns the index of the bucket for the provided value, or
+// len(h.upperBounds) for the +Inf bucket.
+func (h *histogram) findBucket(v float64) int {
+	n := len(h.upperBounds)
+	if n == 0 {
+		return 0
+	}
+
+	// Early exit: if v is less than or equal to the first upper bound, return 0
+	if v <= h.upperBounds[0] {
+		return 0
+	}
+
+	// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
+	if v > h.upperBounds[n-1] {
+		return n
+	}
+
+	// For small arrays, use simple linear search
+	// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
+	// see more details here: https://github.com/prometheus/client_golang/pull/1662
+	if n < 35 {
+		for i, bound := range h.upperBounds {
+			if v <= bound {
+				return i
+			}
+		}
+		// If v is greater than all upper bounds, return len(h.upperBounds)
+		return n
+	}
+
+	// For larger arrays, use stdlib's binary search
+	return sort.SearchFloat64s(h.upperBounds, v)
+}
+
+// observe is the implementation for Observe without the findBucket part.
+func (h *histogram) observe(v float64, bucket int) {
+	// Do not add to sparse buckets for NaN observations.
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1)
+	hotCounts := h.counts[n>>63]
+	hotCounts.observe(v, bucket, doSparse)
+	if doSparse {
+		h.limitBuckets(hotCounts, v, bucket)
+	}
+}
+
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+	if h.nativeHistogramMaxBuckets == 0 {
+		return // No limit configured.
+	}
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded yet.
+	}
+
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// The hot counts might have been swapped just before we acquired the
+	// lock. Re-fetch the hot counts first...
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hotCounts := h.counts[hotIdx]
+	coldCounts := h.counts[coldIdx]
+	// ...and then check again if we really have to reduce the bucket count.
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded after all.
+	}
+	// Try the various strategies in order.
+	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+		return
+	}
+	// One of the other strategies will happen. To undo what they will do as
+	// soon as enough time has passed to satisfy
+	// h.nativeHistogramMinResetDuration, schedule a reset at the right time
+	// if we haven't done so already.
+	if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+		h.resetScheduled = true
+		h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+	}
+
+	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+		return
+	}
+	h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+	hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+	// We are using the possibly mocked h.now() rather than
+	// time.Since(h.lastResetTime) to enable testing.
+	if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+		h.resetScheduled || // Do not interefere if a reset is already scheduled.
+		h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+		return false
+	}
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Repeat the latest observation to not lose it completely.
+	cold.observe(value, bucket, true)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hot := h.counts[hotIdx]
+	cold := h.counts[coldIdx]
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+	currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+	if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+		return false
+	}
+	// Find the key of the bucket closest to zero.
+	smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+	smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+	if smallestNegativeKey < smallestKey {
+		smallestKey = smallestNegativeKey
+	}
+	if smallestKey == math.MaxInt32 {
+		return false
+	}
+	newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+	if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+		return false // New threshold would exceed the max threshold.
+	}
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// Remove applicable buckets.
+	if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	// Make cold counts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the new zero threshold in the cold counts, too...
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// ...and then merge the newly deleted buckets into the wider zero
+	// bucket.
+	mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			if key == smallestKey {
+				// Merge into hot zero bucket...
+				atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+				// ...and delete from cold counts.
+				coldBuckets.Delete(key)
+				atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+			} else {
+				// Add to corresponding hot bucket...
+				if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+					atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+				}
+				// ...and reset cold bucket.
+				atomic.StoreInt64(bucket, 0)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+	return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+	coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+	if coldSchema == -4 {
+		return // Already at lowest resolution.
+	}
+	coldSchema--
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// Play it simple and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+	// Make coldCounts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the schema in the cold counts, too...
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// ...and then merge the cold buckets into the wider hot buckets.
+	merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			// Adjust key to match the bucket to merge into.
+			if key > 0 {
+				key++
+			}
+			key /= 2
+			// Add to corresponding hot bucket.
+			if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+				atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+	// Play it simple again and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+	atomic.StoreUint64(&counts.sumBits, 0)
+	atomic.StoreUint64(&counts.count, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+	atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+	for i := range h.upperBounds {
+		atomic.StoreUint64(&counts.buckets[i], 0)
+	}
+	deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+	deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
+func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, h.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	h.exemplars[bucket].Store(e)
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+	if doSparse {
+		h.nativeExemplars.addExemplar(e)
+	}
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+	*MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+	return V2.NewHistogramVec(HistogramVecOpts{
+		HistogramOpts:  opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &HistogramVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			return newHistogram(desc, opts.HistogramOpts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+	h, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+	h, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &HistogramVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constHistogram struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	buckets    map[float64]uint64
+	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
+}
+
+func (h *constHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+	his := &dto.Histogram{
+		CreatedTimestamp: h.createdTs,
+	}
+
+	buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+	his.SampleCount = proto.Uint64(h.count)
+	his.SampleSum = proto.Float64(h.sum)
+	for upperBound, count := range h.buckets {
+		buckets = append(buckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+
+	if len(buckets) > 0 {
+		sort.Sort(buckSort(buckets))
+	}
+	his.Bucket = buckets
+
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstHistogram would have returned an error.
+func MustNewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+	return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+	return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+//   - bucketFactor <= 1: panics.
+//   - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+	if bucketFactor <= 1 {
+		panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+	}
+	floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+	switch {
+	case floor <= -8:
+		return nativeHistogramSchemaMaximum
+	case floor >= 4:
+		return nativeHistogramSchemaMinimum
+	default:
+		return -int32(floor)
+	}
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+	var ii []int
+	buckets.Range(func(k, v interface{}) bool {
+		ii = append(ii, k.(int))
+		return true
+	})
+	sort.Ints(ii)
+
+	if len(ii) == 0 {
+		return nil, nil
+	}
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		v, _ := buckets.Load(i)
+		count := atomic.LoadInt64(v.(*int64))
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+	if existingBucket, ok := buckets.Load(key); ok {
+		// Fast path without allocation.
+		atomic.AddInt64(existingBucket.(*int64), increment)
+		return false
+	}
+	// Bucket doesn't exist yet. Slow path allocating new counter.
+	newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+	if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+		// The bucket was created concurrently in another goroutine.
+		// Have to increment after all.
+		atomic.AddInt64(actualBucket.(*int64), increment)
+		return false
+	}
+	return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+	return func(k, v interface{}) bool {
+		bucket := v.(*int64)
+		if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+			atomic.AddUint32(bucketNumber, 1)
+		}
+		atomic.StoreInt64(bucket, 0)
+		return true
+	}
+}
+
+func deleteSyncMap(m *sync.Map) {
+	m.Range(func(k, v interface{}) bool {
+		m.Delete(k)
+		return true
+	})
+}
+
+func findSmallestKey(m *sync.Map) int {
+	result := math.MaxInt32
+	m.Range(func(k, v interface{}) bool {
+		key := k.(int)
+		if key < result {
+			result = key
+		}
+		return true
+	})
+	return result
+}
+
+func getLe(key int, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with a key
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its key
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := key << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := key & ((1 << schema) - 1)
+	frac := nativeHistogramBounds[schema][fracIdx]
+	exp := (key >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+	for count != atomic.LoadUint64(&counts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+	for {
+		loadedBits := atomic.LoadUint64(bits)
+		newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+		if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+			break
+		}
+	}
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to.  See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+	atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+	atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+	atomic.StoreUint64(&cold.count, 0)
+	coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+	atomicAddFloat(&hot.sumBits, coldSum)
+	atomic.StoreUint64(&cold.sumBits, 0)
+	for i := range hot.buckets {
+		atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+		atomic.StoreUint64(&cold.buckets[i], 0)
+	}
+	atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+	atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+	sync.Mutex
+
+	// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+	// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+	ttl time.Duration
+
+	exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+	return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+	if ttl == 0 {
+		ttl = 5 * time.Minute
+	}
+
+	if maxCount == 0 {
+		maxCount = 10
+	}
+
+	if maxCount < 0 {
+		maxCount = 0
+		ttl = -1
+	}
+
+	return nativeExemplars{
+		ttl:       ttl,
+		exemplars: make([]*dto.Exemplar, 0, maxCount),
+	}
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+	if !n.isEnabled() {
+		return
+	}
+
+	n.Lock()
+	defer n.Unlock()
+
+	// When the number of exemplars has not yet exceeded or
+	// is equal to cap(n.exemplars), then
+	// insert the new exemplar directly.
+	if len(n.exemplars) < cap(n.exemplars) {
+		var nIdx int
+		for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+			if *e.Value < *n.exemplars[nIdx].Value {
+				break
+			}
+		}
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+		return
+	}
+
+	if len(n.exemplars) == 1 {
+		// When the number of exemplars is 1, then
+		// replace the existing exemplar with the new exemplar.
+		n.exemplars[0] = e
+		return
+	}
+	// From this point on, the number of exemplars is greater than 1.
+
+	// When the number of exemplars exceeds the limit, remove one exemplar.
+	var (
+		ot    = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+		otIdx = -1          // Index of the exemplar with the oldest timestamp.
+
+		md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+		// The insertion point of the new exemplar in the exemplars slice after insertion.
+		// This is calculated purely based on the order of the exemplars by value.
+		// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+		nIdx = -1
+
+		// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+		// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+		// It is calculated in 3 steps:
+		//   1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+		//      That is the following will be true (on log scale):
+		//      either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+		//      the closest values to each other from all pairs.
+		//      For example, suppose the values are distributed like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                   ^--rIdx as this is older.
+		//      Or like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                        ^--rIdx as this is older.
+		//   2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+		//   3. We check if by inserting the new exemplar we would create a closer pair at
+		//      (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+		//      keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+		rIdx = -1
+		cLog float64 // Logarithm of the current exemplar.
+		pLog float64 // Logarithm of the previous exemplar.
+	)
+
+	for i, exemplar := range n.exemplars {
+		// Find the exemplar with the oldest timestamp.
+		if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+			ot = exemplar.Timestamp.AsTime()
+			otIdx = i
+		}
+
+		// Find the index at which to insert new the exemplar.
+		if nIdx == -1 && *e.Value <= *exemplar.Value {
+			nIdx = i
+		}
+
+		// Find the two closest exemplars and pick the one the with older timestamp.
+		pLog = cLog
+		cLog = math.Log(exemplar.GetValue())
+		if i == 0 {
+			continue
+		}
+		diff := math.Abs(cLog - pLog)
+		if md == -1 || diff < md {
+			// The closest exemplar pair is at index: i-1, i.
+			// Choose the exemplar with the older timestamp for replacement.
+			md = diff
+			if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+				rIdx = i
+			} else {
+				rIdx = i - 1
+			}
+		}
+
+	}
+
+	// If all existing exemplar are smaller than new exemplar,
+	// then the exemplar should be inserted at the end.
+	if nIdx == -1 {
+		nIdx = len(n.exemplars)
+	}
+	// Here, we have the following relationships:
+	// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+	// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+	if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+		// If the oldest exemplar has expired, then replace it with the new exemplar.
+		rIdx = otIdx
+	} else {
+		// In the previous for loop, when calculating the closest pair of exemplars,
+		// we did not take into account the newly inserted exemplar.
+		// So we need to calculate with the newly inserted exemplar again.
+		elog := math.Log(e.GetValue())
+		if nIdx > 0 {
+			diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+			if diff < md {
+				// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-n-----------x----------------x----x-----|
+				//     nIdx-1--^ ^--new exemplar value
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				md = diff
+				rIdx = nIdx - 1
+			}
+		}
+		if nIdx < len(n.exemplars) {
+			diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+			if diff < md {
+				// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-----------n-x----------------x----x-----|
+				//     new exemplar value--^ ^--nIdx
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				rIdx = nIdx
+			}
+		}
+	}
+
+	// Adjust the slice according to rIdx and nIdx.
+	switch {
+	case rIdx == nIdx:
+		n.exemplars[nIdx] = e
+	case rIdx < nIdx:
+		n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+	case rIdx > nIdx:
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+	}
+}
+
+type constNativeHistogram struct {
+	desc *Desc
+	dto.Histogram
+	labelPairs []*dto.LabelPair
+}
+
+func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
+	var bucketPopulationSum int64
+	for _, v := range positiveBuckets {
+		bucketPopulationSum += v
+	}
+	for _, v := range negativeBuckets {
+		bucketPopulationSum += v
+	}
+	bucketPopulationSum += int64(zeroBucket)
+
+	// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
+	// Otherwise, the number of observations must be equal to the sum of all bucket counts .
+
+	if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
+		!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
+		return errors.New("the sum of all bucket populations exceeds the count of observations")
+	}
+	return nil
+}
+
+// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
+// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// zeroBucket counts all (positive and negative)
+// observations in the zero bucket (with an absolute value less or equal
+// the current threshold).
+// positiveBuckets and negativeBuckets are separate maps for negative and positive
+// observations. The map's value is an int64, counting observations in
+// that bucket. The map's key is the
+// index of the bucket according to the used
+// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
+// NewConstNativeHistogram returns an error if
+//   - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
+//   - the schema passed is not between 8 and -4
+//   - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
+//
+// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
+func NewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	schema int32,
+	zeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
+		return nil, errors.New("invalid native histogram schema")
+	}
+	if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
+		return nil, err
+	}
+
+	NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
+	PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
+	ret := &constNativeHistogram{
+		desc: desc,
+		Histogram: dto.Histogram{
+			CreatedTimestamp: timestamppb.New(createdTimestamp),
+			Schema:           &schema,
+			ZeroThreshold:    &zeroThreshold,
+			SampleCount:      &count,
+			SampleSum:        &sum,
+
+			NegativeSpan:  NegativeSpan,
+			NegativeDelta: NegativeDelta,
+
+			PositiveSpan:  PositiveSpan,
+			PositiveDelta: PositiveDelta,
+
+			ZeroCount: proto.Uint64(zeroBucket),
+		},
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}
+	if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
+		ret.PositiveSpan = []*dto.BucketSpan{{
+			Offset: proto.Int32(0),
+			Length: proto.Uint32(0),
+		}}
+	}
+	return ret, nil
+}
+
+// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
+// NewConstNativeHistogram would have returned an error.
+func MustNewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	nativeHistogramSchema int32,
+	nativeHistogramZeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) Metric {
+	nativehistogram, err := NewConstNativeHistogram(desc,
+		count,
+		sum,
+		positiveBuckets,
+		negativeBuckets,
+		zeroBucket,
+		nativeHistogramSchema,
+		nativeHistogramZeroThreshold,
+		createdTimestamp,
+		labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return nativehistogram
+}
+
+func (h *constNativeHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constNativeHistogram) Write(out *dto.Metric) error {
+	out.Histogram = &h.Histogram
+	out.Label = h.labelPairs
+	return nil
+}
+
+func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
+	if len(buckets) == 0 {
+		return nil, nil
+	}
+	var ii []int
+	for k := range buckets {
+		ii = append(ii, k)
+	}
+	sort.Ints(ii)
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		count := buckets[i]
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 0000000..1ed5abe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+	"math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+	if a == b {
+		return true
+	}
+	absA := math.Abs(a)
+	absB := math.Abs(b)
+	diff := math.Abs(a - b)
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+		return diff < epsilon*minNormalFloat64
+	}
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := range a {
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000..7bac0da
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,655 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func maxInt(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool,
+) *SequenceMatcher {
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+//	alo <= i <= i+k <= ahi
+//	blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+//	k >= k'
+//	i <= i'
+//	and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{
+				c.Tag, i1, minInt(i2, i1+n),
+				j1, minInt(j2, j1+n),
+			})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s]++
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches++
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(minInt(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return strconv.Itoa(beginning)
+	}
+	if length == 0 {
+		beginning-- // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := fmt.Fprintf(buf, format, args...)
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000..a4fa6ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+	Matcher *regexp.Regexp
+	Deny    bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+	DisableMemStatsLikeMetrics bool
+	RuntimeMetricSumForHist    map[string]string
+	RuntimeMetricRules         []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 0000000..d273b66
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,143 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+	"math"
+	"path"
+	"runtime/metrics"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+	namespace := "go"
+
+	comp := strings.SplitN(d.Name, ":", 2)
+	key := comp[0]
+	unit := comp[1]
+
+	// The last path element in the key is the name,
+	// the rest is the subsystem.
+	subsystem := path.Dir(key[1:] /* remove leading / */)
+	name := path.Base(key)
+
+	// subsystem is translated by replacing all / and - with _.
+	subsystem = strings.ReplaceAll(subsystem, "/", "_")
+	subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+	// unit is translated assuming that the unit contains no
+	// non-ASCII characters.
+	unit = strings.ReplaceAll(unit, "-", "_")
+	unit = strings.ReplaceAll(unit, "*", "_")
+	unit = strings.ReplaceAll(unit, "/", "_per_")
+
+	// name has - replaced with _ and is concatenated with the unit and
+	// other data.
+	name = strings.ReplaceAll(name, "-", "_")
+	name += "_" + unit
+	if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+		name += "_total"
+	}
+
+	// Our current conversion moves to legacy naming, so use legacy validation.
+	valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
+	switch d.Kind {
+	case metrics.KindUint64:
+	case metrics.KindFloat64:
+	case metrics.KindFloat64Histogram:
+	default:
+		valid = false
+	}
+	return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+	switch unit {
+	case "bytes":
+		// Re-bucket as powers of 2.
+		return reBucketExp(buckets, 2)
+	case "seconds":
+		// Re-bucket as powers of 10 and then merge all buckets greater
+		// than 1 second into the +Inf bucket.
+		b := reBucketExp(buckets, 10)
+		for i := range b {
+			if b[i] <= 1 {
+				continue
+			}
+			b[i] = math.Inf(1)
+			b = b[:i+1]
+			break
+		}
+		return b
+	}
+	return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+	bucket := buckets[0]
+	var newBuckets []float64
+	// We may see a -Inf here, in which case, add it and skip it
+	// since we risk producing NaNs otherwise.
+	//
+	// We need to preserve -Inf values to maintain runtime/metrics
+	// conventions. We'll strip it out later.
+	if bucket == math.Inf(-1) {
+		newBuckets = append(newBuckets, bucket)
+		buckets = buckets[1:]
+		bucket = buckets[0]
+	}
+	// From now on, bucket should always have a non-Inf value because
+	// Infs are only ever at the ends of the bucket lists, so
+	// arithmetic operations on it are non-NaN.
+	for i := 1; i < len(buckets); i++ {
+		if bucket >= 0 && buckets[i] < bucket*base {
+			// The next bucket we want to include is at least bucket*base.
+			continue
+		} else if bucket < 0 && buckets[i] < bucket/base {
+			// In this case the bucket we're targeting is negative, and since
+			// we're ascending through buckets here, we need to divide to get
+			// closer to zero exponentially.
+			continue
+		}
+		// The +Inf bucket will always be the last one, and we'll always
+		// end up including it here because bucket
+		newBuckets = append(newBuckets, bucket)
+		bucket = buckets[i]
+	}
+	return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 0000000..6515c11
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"sort"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+	return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+	return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+	return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
+	if len(s[i].Label) != len(s[j].Label) {
+		// This should not happen. The metrics are
+		// inconsistent. However, we have to deal with the fact, as
+		// people might use custom collectors or metric family injection
+		// to create inconsistent metrics. So let's simply compare the
+		// number of labels in this case. That will still yield
+		// reproducible sorting.
+		return len(s[i].Label) < len(s[j].Label)
+	}
+	for n, lp := range s[i].Label {
+		vi := lp.GetValue()
+		vj := s[j].Label[n].GetValue()
+		if vi != vj {
+			return vi < vj
+		}
+	}
+
+	// We should never arrive here. Multiple metrics with the same
+	// label set in the same scrape will lead to undefined ingestion
+	// behavior. However, as above, we have to provide stable sorting
+	// here, even for inconsistent metrics. So sort equal metrics
+	// by their timestamp, with missing timestamps (implying "now")
+	// coming last.
+	if s[i].TimestampMs == nil {
+		return false
+	}
+	if s[j].TimestampMs == nil {
+		return true
+	}
+	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+	for _, mf := range metricFamiliesByName {
+		sort.Sort(MetricSorter(mf.Metric))
+	}
+	names := make([]string, 0, len(metricFamiliesByName))
+	for name, mf := range metricFamiliesByName {
+		if len(mf.Metric) > 0 {
+			names = append(names, name)
+		}
+	}
+	sort.Strings(names)
+	result := make([]*dto.MetricFamily, 0, len(names))
+	for _, name := range names {
+		result = append(result, metricFamiliesByName[name])
+	}
+	return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..5fe8d3b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,189 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+//
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+	Name       string
+	Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+//	prometheus.V2().NewCounterVec(CounterVecOpts{
+//	  CounterOpts: {...}, // Usual CounterOpts fields
+//	  VariableLabels: []ConstrainedLabels{
+//	    {Name: "A"},
+//	    {Name: "B", Constraint: func(v string) string { ... }},
+//	  },
+//	})
+type ConstrainableLabels interface {
+	compile() *compiledLabels
+	labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+	compiled := &compiledLabels{
+		names:            make([]string, len(cls)),
+		labelConstraints: map[string]LabelConstraint{},
+	}
+
+	for i, label := range cls {
+		compiled.names[i] = label.Name
+		if label.Constraint != nil {
+			compiled.labelConstraints[label.Name] = label.Constraint
+		}
+	}
+
+	return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+	names := make([]string, len(cls))
+	for i, label := range cls {
+		names[i] = label.Name
+	}
+	return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+//	UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+//	ConstrainedLabels {
+//	  { Name: "A" },
+//	  { Name: "B" },
+//	}
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+	return &compiledLabels{
+		names: uls,
+	}
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+	return uls
+}
+
+type compiledLabels struct {
+	names            []string
+	labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+	return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+	return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+	if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+		return fn(value)
+	}
+	return value
+}
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+	return fmt.Errorf(
+		"%w: %q has %d variable labels named %q but %d values %q were provided",
+		errInconsistentCardinality, fqName,
+		len(labels), labels,
+		len(labelValues), labelValues,
+	)
+}
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+	if len(labels) != expectedNumberOfValues {
+		return fmt.Errorf(
+			"%w: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(labels), labels,
+		)
+	}
+
+	for name, val := range labels {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+		}
+	}
+
+	return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+	if len(vals) != expectedNumberOfValues {
+		// The call below makes vals escape, copy them to avoid that.
+		vals := append([]string(nil), vals...)
+		return fmt.Errorf(
+			"%w: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(vals), vals,
+		)
+	}
+
+	for _, val := range vals {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label value %q is not valid UTF-8", val)
+		}
+	}
+
+	return nil
+}
+
+func checkLabelName(l string) bool {
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..76e59f1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,276 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"math"
+	"sort"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
+)
+
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+	// Desc returns the descriptor for the Metric. This method idempotently
+	// returns the same descriptor throughout the lifetime of the
+	// Metric. The returned descriptor is immutable by contract. A Metric
+	// unable to describe itself must return an invalid descriptor (created
+	// with NewInvalidDesc).
+	Desc() *Desc
+	// Write encodes the Metric into a "Metric" Protocol Buffer data
+	// transmission object.
+	//
+	// Metric implementations must observe concurrency safety as reads of
+	// this metric may occur at any time, and any blocking occurs at the
+	// expense of total performance of rendering all registered
+	// metrics. Ideally, Metric implementations should support concurrent
+	// readers.
+	//
+	// While populating dto.Metric, it is the responsibility of the
+	// implementation to ensure validity of the Metric protobuf (like valid
+	// UTF-8 strings or syntactically valid metric and label names). It is
+	// recommended to sort labels lexicographically. Callers of Write should
+	// still make sure of sorting if they depend on it.
+	Write(*dto.Metric) error
+	// TODO(beorn7): The original rationale of passing in a pre-allocated
+	// dto.Metric protobuf to save allocations has disappeared. The
+	// signature of this method should be changed to "Write() (*dto.Metric,
+	// error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Metric (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the metric must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this metric.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+	if name == "" {
+		return ""
+	}
+
+	sb := strings.Builder{}
+	sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
+
+	if namespace != "" {
+		sb.WriteString(namespace)
+		sb.WriteString("_")
+	}
+
+	if subsystem != "" {
+		sb.WriteString(subsystem)
+		sb.WriteString("_")
+	}
+
+	sb.WriteString(name)
+
+	return sb.String()
+}
+
+type invalidMetric struct {
+	desc *Desc
+	err  error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+	return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+	Metric
+	t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+	e := m.Metric.Write(pb)
+	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+	return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+	return timestampedMetric{Metric: m, t: t}
+}
+
+type withExemplarsMetric struct {
+	Metric
+
+	exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+	if err := m.Metric.Write(pb); err != nil {
+		return err
+	}
+
+	switch {
+	case pb.Counter != nil:
+		pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+	case pb.Histogram != nil:
+		h := pb.Histogram
+		for _, e := range m.exemplars {
+			if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+				len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+				e.GetTimestamp() != nil {
+				h.Exemplars = append(h.Exemplars, e)
+				if len(h.Bucket) == 0 {
+					// Don't proceed to classic buckets if there are none.
+					continue
+				}
+			}
+			// h.Bucket are sorted by UpperBound.
+			i := sort.Search(len(h.Bucket), func(i int) bool {
+				return h.Bucket[i].GetUpperBound() >= e.GetValue()
+			})
+			if i < len(h.Bucket) {
+				h.Bucket[i].Exemplar = e
+			} else {
+				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+				b := &dto.Bucket{
+					CumulativeCount: proto.Uint64(h.GetSampleCount()),
+					UpperBound:      proto.Float64(math.Inf(1)),
+					Exemplar:        e,
+				}
+				h.Bucket = append(h.Bucket, b)
+			}
+		}
+	default:
+		// TODO(bwplotka): Implement Gauge?
+		return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+	}
+
+	return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+	Value  float64
+	Labels Labels
+	// Optional.
+	// Default value (time.Time{}) indicates its empty, which should be
+	// understood as time.Now() time at the moment of creation of metric.
+	Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+	if len(exemplars) == 0 {
+		return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+	}
+
+	var (
+		now = time.Now()
+		exs = make([]*dto.Exemplar, len(exemplars))
+		err error
+	)
+	for i, e := range exemplars {
+		ts := e.Timestamp
+		if ts.IsZero() {
+			ts = now
+		}
+		exs[i], err = newExemplar(e.Value, ts, e.Labels)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+	ret, err := NewMetricWithExemplars(m, exemplars...)
+	if err != nil {
+		panic(err)
+	}
+	return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000..7c12b21
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	n, _ := runtime.ThreadCreateProfile(nil)
+	return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000..7348df0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..03773b2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,64 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+	Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+	f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+	GetMetricWith(Labels) (Observer, error)
+	GetMetricWithLabelValues(lvs ...string) (Observer, error)
+	With(Labels) Observer
+	WithLabelValues(...string) Observer
+	CurryWith(Labels) (ObserverVec, error)
+	MustCurryWith(Labels) ObserverVec
+
+	Collector
+}
+
+// ExemplarObserver is implemented by Observers that offer the option of
+// observing a value together with an exemplar. Its ObserveWithExemplar method
+// works like the Observe method of an Observer but also replaces the currently
+// saved exemplar (if any) with a new one, created from the provided value, the
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
+// left in place. ObserveWithExemplar panics if any of the provided labels are
+// invalid or if the provided labels contain more than 128 runes in total.
+type ExemplarObserver interface {
+	ObserveWithExemplar(value float64, exemplar Labels)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..e7bce8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+type processCollector struct {
+	collectFn         func(chan<- Metric)
+	describeFn        func(chan<- *Desc)
+	pidFn             func() (int, error)
+	reportErrors      bool
+	cpuTotal          *Desc
+	openFDs, maxFDs   *Desc
+	vsize, maxVsize   *Desc
+	rss               *Desc
+	startTime         *Desc
+	inBytes, outBytes *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+	// PidFn returns the PID of the process the collector collects metrics
+	// for. It is called upon each collection. By default, the PID of the
+	// current process is used, as determined on construction time by
+	// calling os.Getpid().
+	PidFn func() (int, error)
+	// If non-empty, each of the collected metrics is prefixed by the
+	// provided string and an underscore ("_").
+	Namespace string
+	// If true, any error encountered during collection is reported as an
+	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+	// and the collected metrics will be incomplete. (Possibly, no metrics
+	// will be collected at all.) While that's usually not desired, it is
+	// appropriate for the common "mix-in" of process metrics, where process
+	// metrics are nice to have, but failing to collect them should not
+	// disrupt the collection of the remaining metrics.
+	ReportErrors bool
+}
+
+// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewProcessCollector instead.
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+	ns := ""
+	if len(opts.Namespace) > 0 {
+		ns = opts.Namespace + "_"
+	}
+
+	c := &processCollector{
+		reportErrors: opts.ReportErrors,
+		cpuTotal: NewDesc(
+			ns+"process_cpu_seconds_total",
+			"Total user and system CPU time spent in seconds.",
+			nil, nil,
+		),
+		openFDs: NewDesc(
+			ns+"process_open_fds",
+			"Number of open file descriptors.",
+			nil, nil,
+		),
+		maxFDs: NewDesc(
+			ns+"process_max_fds",
+			"Maximum number of open file descriptors.",
+			nil, nil,
+		),
+		vsize: NewDesc(
+			ns+"process_virtual_memory_bytes",
+			"Virtual memory size in bytes.",
+			nil, nil,
+		),
+		maxVsize: NewDesc(
+			ns+"process_virtual_memory_max_bytes",
+			"Maximum amount of virtual memory available in bytes.",
+			nil, nil,
+		),
+		rss: NewDesc(
+			ns+"process_resident_memory_bytes",
+			"Resident memory size in bytes.",
+			nil, nil,
+		),
+		startTime: NewDesc(
+			ns+"process_start_time_seconds",
+			"Start time of the process since unix epoch in seconds.",
+			nil, nil,
+		),
+		inBytes: NewDesc(
+			ns+"process_network_receive_bytes_total",
+			"Number of bytes received by the process over the network.",
+			nil, nil,
+		),
+		outBytes: NewDesc(
+			ns+"process_network_transmit_bytes_total",
+			"Number of bytes sent by the process over the network.",
+			nil, nil,
+		),
+	}
+
+	if opts.PidFn == nil {
+		c.pidFn = getPIDFn()
+	} else {
+		c.pidFn = opts.PidFn
+	}
+
+	// Set up process metric collection if supported by the runtime.
+	if canCollectProcess() {
+		c.collectFn = c.processCollect
+		c.describeFn = c.describe
+	} else {
+		c.collectFn = c.errorCollectFn
+		c.describeFn = c.errorDescribeFn
+	}
+
+	return c
+}
+
+func (c *processCollector) errorCollectFn(ch chan<- Metric) {
+	c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+}
+
+func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
+	if c.reportErrors {
+		ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+	c.collectFn(ch)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+	c.describeFn(ch)
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+	if !c.reportErrors {
+		return
+	}
+	if desc == nil {
+		desc = NewInvalidDesc(err)
+	}
+	ch <- NewInvalidMetric(desc, err)
+}
+
+// NewPidFileFn returns a function that retrieves a pid from the specified file.
+// It is meant to be used for the PidFn field in ProcessCollectorOpts.
+func NewPidFileFn(pidFilePath string) func() (int, error) {
+	return func() (int, error) {
+		content, err := os.ReadFile(pidFilePath)
+		if err != nil {
+			return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
+		}
+		pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
+		if err != nil {
+			return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
+		}
+
+		return pid, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
new file mode 100644
index 0000000..b32c95f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -0,0 +1,130 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
+// isn't available.
+var errNotImplemented = errors.New("not implemented")
+
+type memoryInfo struct {
+	vsize uint64 // Virtual memory size in bytes
+	rss   uint64 // Resident memory size in bytes
+}
+
+func canCollectProcess() bool {
+	return true
+}
+
+func getSoftLimit(which int) (uint64, error) {
+	rlimit := syscall.Rlimit{}
+
+	if err := syscall.Getrlimit(which, &rlimit); err != nil {
+		return 0, err
+	}
+
+	return rlimit.Cur, nil
+}
+
+func getOpenFileCount() (float64, error) {
+	// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
+	// return a list of open fds, but that requires a way to call C APIs.  The
+	// benefits, however, include fewer system calls and not failing when at the
+	// open file soft limit.
+
+	if dir, err := os.Open("/dev/fd"); err != nil {
+		return 0.0, err
+	} else {
+		defer dir.Close()
+
+		// Avoid ReadDir(), as it calls stat(2) on each descriptor.  Not only is
+		// that info not used, but KQUEUE descriptors fail stat(2), which causes
+		// the whole method to fail.
+		if names, err := dir.Readdirnames(0); err != nil {
+			return 0.0, err
+		} else {
+			// Subtract 1 to ignore the open /dev/fd descriptor above.
+			return float64(len(names) - 1), nil
+		}
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
+		if len(procs) == 1 {
+			startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, c.startTime, err)
+	}
+
+	// The proc structure returned by kern.proc.pid above has an Rusage member,
+	// but it is not filled in, so it needs to be fetched by getrusage(2).  For
+	// that call, the UTime, STime, and Maxrss members are filled out, but not
+	// Ixrss, Idrss, or Isrss for the memory usage.  Memory stats will require
+	// access to the C API to call task_info(TASK_BASIC_INFO).
+	rusage := unix.Rusage{}
+
+	if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
+		cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
+	} else {
+		c.reportError(ch, c.cpuTotal, err)
+	}
+
+	if memInfo, err := getMemory(); err == nil {
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
+	} else if !errors.Is(err, errNotImplemented) {
+		// Don't report an error when support is not compiled in.
+		c.reportError(ch, c.rss, err)
+		c.reportError(ch, c.vsize, err)
+	}
+
+	if fds, err := getOpenFileCount(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
+	} else {
+		c.reportError(ch, c.maxFDs, err)
+	}
+
+	if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
+	} else {
+		c.reportError(ch, c.maxVsize, err)
+	}
+
+	// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
+	//  be able to get the per-process network send/receive counts.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
new file mode 100644
index 0000000..d00a243
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
@@ -0,0 +1,84 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/mach_vm.h>
+
+// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
+// mach/shared_region.h instead.  But that doesn't define
+// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
+// avoid a warning message when running tests.
+#define GLOBAL_SHARED_TEXT_SEGMENT      0x90000000U
+#define SHARED_DATA_REGION_SIZE         0x10000000
+#define SHARED_TEXT_REGION_SIZE         0x10000000
+
+
+int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
+{
+    // This is lightly adapted from how ps(1) obtains its memory info.
+    // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
+
+    kern_return_t               error;
+    task_t                      task = MACH_PORT_NULL;
+    mach_task_basic_info_data_t info;
+    mach_msg_type_number_t      info_count = MACH_TASK_BASIC_INFO_COUNT;
+
+    error = task_info(
+                mach_task_self(),
+                MACH_TASK_BASIC_INFO,
+                (task_info_t) &info,
+                &info_count );
+
+    if( error != KERN_SUCCESS )
+    {
+        return error;
+    }
+
+    *rss   = info.resident_size;
+    *vsize = info.virtual_size;
+
+    {
+        vm_region_basic_info_data_64_t    b_info;
+        mach_vm_address_t                 address = GLOBAL_SHARED_TEXT_SEGMENT;
+        mach_vm_size_t                    size;
+        mach_port_t                       object_name;
+
+        /*
+         * try to determine if this task has the split libraries
+         * mapped in... if so, adjust its virtual size down by
+         * the 2 segments that are used for split libraries
+         */
+        info_count = VM_REGION_BASIC_INFO_COUNT_64;
+
+        error = mach_vm_region(
+                    mach_task_self(),
+                    &address,
+                    &size,
+                    VM_REGION_BASIC_INFO_64,
+                    (vm_region_info_t) &b_info,
+                    &info_count,
+                    &object_name);
+
+        if (error == KERN_SUCCESS) {
+            if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
+                *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
+                    *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
+            }
+        }
+    }
+
+    return 0;
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
new file mode 100644
index 0000000..9ac53f9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+package prometheus
+
+/*
+int get_memory_info(unsigned long long *rss, unsigned long long *vs);
+*/
+import "C"
+import "fmt"
+
+func getMemory() (*memoryInfo, error) {
+	var rss, vsize C.ulonglong
+
+	if err := C.get_memory_info(&rss, &vsize); err != 0 {
+		return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
+	}
+
+	return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+	ch <- c.rss
+	ch <- c.vsize
+
+	/* the process could be collected but not implemented yet
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
new file mode 100644
index 0000000..3788651
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && !cgo
+
+package prometheus
+
+func getMemory() (*memoryInfo, error) {
+	return nil, errNotImplemented
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+
+	/* the process could be collected but not implemented yet
+	ch <- c.rss
+	ch <- c.vsize
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
new file mode 100644
index 0000000..7732b7f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1 || js || ios
+// +build wasip1 js ios
+
+package prometheus
+
+func canCollectProcess() bool {
+	return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	c.errorCollectFn(ch)
+}
+
+// describe returns all descriptions of the collector for wasip1 and js.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	c.errorDescribeFn(ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
new file mode 100644
index 0000000..8074f70
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -0,0 +1,96 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !js && !wasip1 && !darwin
+// +build !windows,!js,!wasip1,!darwin
+
+package prometheus
+
+import (
+	"github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+	_, err := procfs.NewDefaultFS()
+	return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	pid, err := c.pidFn()
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	p, err := procfs.NewProc(pid)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	if stat, err := p.Stat(); err == nil {
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+		if startTime, err := stat.StartTime(); err == nil {
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if fds, err := p.FileDescriptorsLen(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if limits, err := p.Limits(); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if netstat, err := p.Netstat(); err == nil {
+		var inOctets, outOctets float64
+		if netstat.InOctets != nil {
+			inOctets = *netstat.InOctets
+		}
+		if netstat.OutOctets != nil {
+			outOctets = *netstat.OutOctets
+		}
+		ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+		ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}
+
+// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.maxVsize
+	ch <- c.rss
+	ch <- c.startTime
+	ch <- c.inBytes
+	ch <- c.outBytes
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
new file mode 100644
index 0000000..fa47428
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -0,0 +1,125 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+	return true
+}
+
+var (
+	modpsapi    = syscall.NewLazyDLL("psapi.dll")
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procGetProcessMemoryInfo  = modpsapi.NewProc("GetProcessMemoryInfo")
+	procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+	// System interface description
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
+
+	// Refer to the Golang internal implementation
+	// https://golang.org/src/internal/syscall/windows/psapi_windows.go
+	_                          uint32
+	PageFaultCount             uint32
+	PeakWorkingSetSize         uintptr
+	WorkingSetSize             uintptr
+	QuotaPeakPagedPoolUsage    uintptr
+	QuotaPagedPoolUsage        uintptr
+	QuotaPeakNonPagedPoolUsage uintptr
+	QuotaNonPagedPoolUsage     uintptr
+	PagefileUsage              uintptr
+	PeakPagefileUsage          uintptr
+	PrivateUsage               uintptr
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+	mem := processMemoryCounters{}
+	r1, _, err := procGetProcessMemoryInfo.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&mem)),
+		uintptr(unsafe.Sizeof(mem)),
+	)
+	if r1 != 1 {
+		return mem, err
+	} else {
+		return mem, nil
+	}
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+	var count uint32
+	r1, _, err := procGetProcessHandleCount.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&count)),
+	)
+	if r1 != 1 {
+		return 0, err
+	} else {
+		return count, nil
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	h := windows.CurrentProcess()
+
+	var startTime, exitTime, kernelTime, userTime windows.Filetime
+	err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+	ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+	mem, err := getProcessMemoryInfo(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+	ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+	handles, err := getProcessHandleCount(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+// describe returns all descriptions of the collector for windows.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.rss
+	ch <- c.startTime
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..c6fd2f5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,1076 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode/utf8"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	"github.com/cespare/xxhash/v2"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
+	"google.golang.org/protobuf/proto"
+)
+
+const (
+	// Capacity for the channel to collect metrics and descriptors.
+	capMetricChan = 1000
+	capDescChan   = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+	defaultRegistry              = NewRegistry()
+	DefaultRegisterer Registerer = defaultRegistry
+	DefaultGatherer   Gatherer   = defaultRegistry
+)
+
+func init() {
+	MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+	MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+	return &Registry{
+		collectorsByID:  map[uint64]Collector{},
+		descIDs:         map[uint64]struct{}{},
+		dimHashesByName: map[string]uint64{},
+	}
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe method does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+	r := NewRegistry()
+	r.pedanticChecksEnabled = true
+	return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+	// Register registers a new Collector to be included in metrics
+	// collection. It returns an error if the descriptors provided by the
+	// Collector are invalid or if they — in combination with descriptors of
+	// already registered Collectors — do not fulfill the consistency and
+	// uniqueness criteria described in the documentation of metric.Desc.
+	//
+	// If the provided Collector is equal to a Collector already registered
+	// (which includes the case of re-registering the same Collector), the
+	// returned error is an instance of AlreadyRegisteredError, which
+	// contains the previously registered Collector.
+	//
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
+	Register(Collector) error
+	// MustRegister works like Register but registers any number of
+	// Collectors and panics upon the first registration that causes an
+	// error.
+	MustRegister(...Collector)
+	// Unregister unregisters the Collector that equals the Collector passed
+	// in as an argument.  (Two Collectors are considered equal if their
+	// Describe method yields the same set of descriptors.) The function
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
+	//
+	// Note that even after unregistering, it will not be possible to
+	// register a new Collector that is inconsistent with the unregistered
+	// Collector, e.g. a Collector collecting metrics with the same name but
+	// a different help string. The rationale here is that the same registry
+	// instance must only collect consistent metrics throughout its
+	// lifetime.
+	Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+	// Gather calls the Collect method of the registered Collectors and then
+	// gathers the collected metrics into a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+	return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+	DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+	return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+	return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+	ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+	return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+// Error formats the contained errors as a bullet point list, preceded by the
+// total number of errors. Note that this results in a multi-line string.
+func (errs MultiError) Error() string {
+	if len(errs) == 0 {
+		return ""
+	}
+	buf := &bytes.Buffer{}
+	fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+	for _, err := range errs {
+		fmt.Fprintf(buf, "\n* %s", err)
+	}
+	return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+	if err != nil {
+		*errs = append(*errs, err)
+	}
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+	switch len(errs) {
+	case 0:
+		return nil
+	case 1:
+		return errs[0]
+	default:
+		return errs
+	}
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
+type Registry struct {
+	mtx                   sync.RWMutex
+	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
+	descIDs               map[uint64]struct{}
+	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
+	pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+	var (
+		descChan           = make(chan *Desc, capDescChan)
+		newDescIDs         = map[uint64]struct{}{}
+		newDimHashesByName = map[string]uint64{}
+		collectorID        uint64 // All desc IDs XOR'd together.
+		duplicateDescErr   error
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	r.mtx.Lock()
+	defer func() {
+		// Drain channel in case of premature return to not leak a goroutine.
+		for range descChan {
+		}
+		r.mtx.Unlock()
+	}()
+	// Conduct various tests...
+	for desc := range descChan {
+
+		// Is the descriptor valid at all?
+		if desc.err != nil {
+			return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
+		}
+
+		// Is the descID unique?
+		// (In other words: Is the fqName + constLabel combination unique?)
+		if _, exists := r.descIDs[desc.id]; exists {
+			duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+		}
+		// If it is not a duplicate desc in this collector, XOR it to
+		// the collectorID.  (We allow duplicate descs within the same
+		// collector, but their existence must be a no-op.)
+		if _, exists := newDescIDs[desc.id]; !exists {
+			newDescIDs[desc.id] = struct{}{}
+			collectorID ^= desc.id
+		}
+
+		// Are all the label names and the help string consistent with
+		// previous descriptors of the same name?
+		// First check existing descriptors...
+		if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+			}
+			continue
+		}
+
+		// ...then check the new descriptors already seen.
+		if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+			}
+			continue
+		}
+		newDimHashesByName[desc.fqName] = desc.dimHash
+	}
+	// A Collector yielding no Desc at all is considered unchecked.
+	if len(newDescIDs) == 0 {
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
+	}
+	if existing, exists := r.collectorsByID[collectorID]; exists {
+		switch e := existing.(type) {
+		case *wrappingCollector:
+			return AlreadyRegisteredError{
+				ExistingCollector: e.unwrapRecursively(),
+				NewCollector:      c,
+			}
+		default:
+			return AlreadyRegisteredError{
+				ExistingCollector: e,
+				NewCollector:      c,
+			}
+		}
+	}
+	// If the collectorID is new, but at least one of the descs existed
+	// before, we are in trouble.
+	if duplicateDescErr != nil {
+		return duplicateDescErr
+	}
+
+	// Only after all tests have passed, actually register.
+	r.collectorsByID[collectorID] = c
+	for hash := range newDescIDs {
+		r.descIDs[hash] = struct{}{}
+	}
+	for name, dimHash := range newDimHashesByName {
+		r.dimHashesByName[name] = dimHash
+	}
+	return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+	var (
+		descChan    = make(chan *Desc, capDescChan)
+		descIDs     = map[uint64]struct{}{}
+		collectorID uint64 // All desc IDs XOR'd together.
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	for desc := range descChan {
+		if _, exists := descIDs[desc.id]; !exists {
+			collectorID ^= desc.id
+			descIDs[desc.id] = struct{}{}
+		}
+	}
+
+	r.mtx.RLock()
+	if _, exists := r.collectorsByID[collectorID]; !exists {
+		r.mtx.RUnlock()
+		return false
+	}
+	r.mtx.RUnlock()
+
+	r.mtx.Lock()
+	defer r.mtx.Unlock()
+
+	delete(r.collectorsByID, collectorID)
+	for id := range descIDs {
+		delete(r.descIDs, id)
+	}
+	// dimHashesByName is left untouched as those must be consistent
+	// throughout the lifetime of a program.
+	return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+	r.mtx.RLock()
+
+	if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+		// Fast path.
+		r.mtx.RUnlock()
+		return nil, nil
+	}
+
+	var (
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
+	)
+
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+	for _, collector := range r.collectorsByID {
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
+	}
+	// In case pedantic checks are enabled, we have to copy the map before
+	// giving up the RLock.
+	if r.pedanticChecksEnabled {
+		registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+		for id := range r.descIDs {
+			registeredDescIDs[id] = struct{}{}
+		}
+	}
+	r.mtx.RUnlock()
+
+	wg.Add(goroutineBudget)
+
+	collectWorker := func() {
+		for {
+			select {
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
+			default:
+				return
+			}
+			wg.Done()
+		}
+	}
+
+	// Start the first worker now to make sure at least one is running.
+	go collectWorker()
+	goroutineBudget--
+
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
+	go func() {
+		wg.Wait()
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
+	}()
+
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+	defer func() {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
+		}
+	}()
+
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
+	for {
+		select {
+		case metric, ok := <-cmc:
+			if !ok {
+				cmc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				registeredDescIDs,
+			))
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
+		default:
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+				// All collectors are already being worked on or
+				// we have already as many goroutines started as
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						registeredDescIDs,
+					))
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
+					))
+				}
+				break
+			}
+			// Start more workers.
+			go collectWorker()
+			goroutineBudget--
+			runtime.Gosched()
+		}
+		// Once both checkedMetricChan and uncheckedMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	// Only report the checked Collectors; unchecked collectors don't report any
+	// Desc.
+	for _, c := range r.collectorsByID {
+		c.Describe(ch)
+	}
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	for _, c := range r.collectorsByID {
+		c.Collect(ch)
+	}
+	for _, c := range r.uncheckedCollectors {
+		c.Collect(ch)
+	}
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+	tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+	if err != nil {
+		return err
+	}
+	defer os.Remove(tmp.Name())
+
+	mfs, err := g.Gather()
+	if err != nil {
+		return err
+	}
+	for _, mf := range mfs {
+		if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+			return err
+		}
+	}
+	if err := tmp.Close(); err != nil {
+		return err
+	}
+
+	if err := os.Chmod(tmp.Name(), 0o644); err != nil {
+		return err
+	}
+	return os.Rename(tmp.Name(), filename)
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+	metric Metric,
+	metricFamiliesByName map[string]*dto.MetricFamily,
+	metricHashes map[uint64]struct{},
+	registeredDescIDs map[uint64]struct{},
+) error {
+	desc := metric.Desc()
+	// Wrapped metrics collected by an unchecked Collector can have an
+	// invalid Desc.
+	if desc.err != nil {
+		return desc.err
+	}
+	dtoMetric := &dto.Metric{}
+	if err := metric.Write(dtoMetric); err != nil {
+		return fmt.Errorf("error collecting metric %v: %w", desc, err)
+	}
+	metricFamily, ok := metricFamiliesByName[desc.fqName]
+	if ok { // Existing name.
+		if metricFamily.GetHelp() != desc.help {
+			return fmt.Errorf(
+				"collected metric %s %s has help %q but should have %q",
+				desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+			)
+		}
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch metricFamily.GetType() {
+		case dto.MetricType_COUNTER:
+			if dtoMetric.Counter == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Counter",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_GAUGE:
+			if dtoMetric.Gauge == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Gauge",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_SUMMARY:
+			if dtoMetric.Summary == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Summary",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_UNTYPED:
+			if dtoMetric.Untyped == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be Untyped",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_HISTOGRAM:
+			if dtoMetric.Histogram == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Histogram",
+					desc.fqName, dtoMetric,
+				)
+			}
+		default:
+			panic("encountered MetricFamily with invalid type")
+		}
+	} else { // New name.
+		metricFamily = &dto.MetricFamily{}
+		metricFamily.Name = proto.String(desc.fqName)
+		metricFamily.Help = proto.String(desc.help)
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch {
+		case dtoMetric.Gauge != nil:
+			metricFamily.Type = dto.MetricType_GAUGE.Enum()
+		case dtoMetric.Counter != nil:
+			metricFamily.Type = dto.MetricType_COUNTER.Enum()
+		case dtoMetric.Summary != nil:
+			metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+		case dtoMetric.Untyped != nil:
+			metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+		case dtoMetric.Histogram != nil:
+			metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+		default:
+			return fmt.Errorf("empty metric collected: %s", dtoMetric)
+		}
+		if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+			return err
+		}
+		metricFamiliesByName[desc.fqName] = metricFamily
+	}
+	if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+		return err
+	}
+	if registeredDescIDs != nil {
+		// Is the desc registered at all?
+		if _, exist := registeredDescIDs[desc.id]; !exist {
+			return fmt.Errorf(
+				"collected metric %s %s with unregistered descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+		if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+			return err
+		}
+	}
+	metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+	return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calls are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+	var (
+		metricFamiliesByName = map[string]*dto.MetricFamily{}
+		metricHashes         = map[uint64]struct{}{}
+		errs                 MultiError // The collected errors to return in the end.
+	)
+
+	for i, g := range gs {
+		mfs, err := g.Gather()
+		if err != nil {
+			multiErr := MultiError{}
+			if errors.As(err, &multiErr) {
+				for _, err := range multiErr {
+					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+				}
+			} else {
+				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+			}
+		}
+		for _, mf := range mfs {
+			existingMF, exists := metricFamiliesByName[mf.GetName()]
+			if exists {
+				if existingMF.GetHelp() != mf.GetHelp() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has help %q but should have %q",
+						mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+					))
+					continue
+				}
+				if existingMF.GetType() != mf.GetType() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has type %s but should have %s",
+						mf.GetName(), mf.GetType(), existingMF.GetType(),
+					))
+					continue
+				}
+			} else {
+				existingMF = &dto.MetricFamily{}
+				existingMF.Name = mf.Name
+				existingMF.Help = mf.Help
+				existingMF.Type = mf.Type
+				if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				metricFamiliesByName[mf.GetName()] = existingMF
+			}
+			for _, m := range mf.Metric {
+				if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				existingMF.Metric = append(existingMF.Metric, m)
+			}
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+	var (
+		newName              = mf.GetName()
+		newType              = mf.GetType()
+		newNameWithoutSuffix = ""
+	)
+	switch {
+	case strings.HasSuffix(newName, "_count"):
+		newNameWithoutSuffix = newName[:len(newName)-6]
+	case strings.HasSuffix(newName, "_sum"):
+		newNameWithoutSuffix = newName[:len(newName)-4]
+	case strings.HasSuffix(newName, "_bucket"):
+		newNameWithoutSuffix = newName[:len(newName)-7]
+	}
+	if newNameWithoutSuffix != "" {
+		if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+			switch existingMF.GetType() {
+			case dto.MetricType_SUMMARY:
+				if !strings.HasSuffix(newName, "_bucket") {
+					return fmt.Errorf(
+						"collected metric named %q collides with previously collected summary named %q",
+						newName, newNameWithoutSuffix,
+					)
+				}
+			case dto.MetricType_HISTOGRAM:
+				return fmt.Errorf(
+					"collected metric named %q collides with previously collected histogram named %q",
+					newName, newNameWithoutSuffix,
+				)
+			}
+		}
+	}
+	if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_count"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_count",
+			)
+		}
+		if _, ok := mfs[newName+"_sum"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_sum",
+			)
+		}
+	}
+	if newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_bucket"]; ok {
+			return fmt.Errorf(
+				"collected histogram named %q collides with previously collected metric named %q",
+				newName, newName+"_bucket",
+			)
+		}
+	}
+	return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	metricHashes map[uint64]struct{},
+) error {
+	name := metricFamily.GetName()
+
+	// Type consistency with metric family.
+	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+		metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+		return fmt.Errorf(
+			"collected metric %q { %s} is not a %s",
+			name, dtoMetric, metricFamily.GetType(),
+		)
+	}
+
+	previousLabelName := ""
+	for _, labelPair := range dtoMetric.GetLabel() {
+		labelName := labelPair.GetName()
+		if labelName == previousLabelName {
+			return fmt.Errorf(
+				"collected metric %q { %s} has two or more labels with the same name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if !checkLabelName(labelName) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label with an invalid name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if dtoMetric.Summary != nil && labelName == quantileLabel {
+			return fmt.Errorf(
+				"collected metric %q { %s} must not have an explicit %q label",
+				name, dtoMetric, quantileLabel,
+			)
+		}
+		if !utf8.ValidString(labelPair.GetValue()) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+				name, dtoMetric, labelName, labelPair.GetValue())
+		}
+		previousLabelName = labelName
+	}
+
+	// Is the metric unique (i.e. no other metric with the same name and the same labels)?
+	h := xxhash.New()
+	h.WriteString(name)
+	h.Write(separatorByteSlice)
+	// Make sure label pairs are sorted. We depend on it for the consistency
+	// check.
+	if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
+		// We cannot sort dtoMetric.Label in place as it is immutable by contract.
+		copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+		copy(copiedLabels, dtoMetric.Label)
+		sort.Sort(internal.LabelPairSorter(copiedLabels))
+		dtoMetric.Label = copiedLabels
+	}
+	for _, lp := range dtoMetric.Label {
+		h.WriteString(lp.GetName())
+		h.Write(separatorByteSlice)
+		h.WriteString(lp.GetValue())
+		h.Write(separatorByteSlice)
+	}
+	if dtoMetric.TimestampMs != nil {
+		h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+		h.Write(separatorByteSlice)
+	}
+	hSum := h.Sum64()
+	if _, exists := metricHashes[hSum]; exists {
+		return fmt.Errorf(
+			"collected metric %q { %s} was collected before with the same name and label values",
+			name, dtoMetric,
+		)
+	}
+	metricHashes[hSum] = struct{}{}
+	return nil
+}
+
+func checkDescConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	desc *Desc,
+) error {
+	// Desc help consistency with metric family help.
+	if metricFamily.GetHelp() != desc.help {
+		return fmt.Errorf(
+			"collected metric %s %s has help %q but should have %q",
+			metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+		)
+	}
+
+	// Is the desc consistent with the content of the metric?
+	lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+	copy(lpsFromDesc, desc.constLabelPairs)
+	for _, l := range desc.variableLabels.names {
+		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+			Name: proto.String(l),
+		})
+	}
+	if len(lpsFromDesc) != len(dtoMetric.Label) {
+		return fmt.Errorf(
+			"labels in collected metric %s %s are inconsistent with descriptor %s",
+			metricFamily.GetName(), dtoMetric, desc,
+		)
+	}
+	sort.Sort(internal.LabelPairSorter(lpsFromDesc))
+	for i, lpFromDesc := range lpsFromDesc {
+		lpFromMetric := dtoMetric.Label[i]
+		if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+			lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+			return fmt.Errorf(
+				"labels in collected metric %s %s are inconsistent with descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+	}
+	return nil
+}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+	tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+	return &MultiTRegistry{
+		tGatherers: tGatherers,
+	}
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+	errs := MultiError{}
+
+	dFns := make([]func(), 0, len(r.tGatherers))
+	// TODO(bwplotka): Implement concurrency for those?
+	for _, g := range r.tGatherers {
+		// TODO(bwplotka): Check for duplicates?
+		m, d, err := g.Gather()
+		errs.Append(err)
+
+		mfs = append(mfs, m...)
+		dFns = append(dFns, d)
+	}
+
+	// TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+	sort.Slice(mfs, func(i, j int) bool {
+		return *mfs[i].Name < *mfs[j].Name
+	})
+	return mfs, func() {
+		for _, d := range dFns {
+			d()
+		}
+	}, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+	// Gather returns metrics in a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	//
+	// Important: done is expected to be triggered (even if the error occurs!)
+	// once caller does not need returned slice of dto.MetricFamily.
+	Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+	return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+	g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+	mfs, err := g.g.Gather()
+	return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..ac5203c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,830 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/beorn7/perks/quantile"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the summary. Observations are
+	// usually positive or zero. Negative observations are accepted but
+	// prevent current versions of Prometheus from properly detecting
+	// counter resets in the sum of observations. See
+	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+	// for details.
+	Observe(float64)
+}
+
+var errQuantileLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in summaries", quantileLabel,
+)
+
+// Default values for SummaryOpts.
+const (
+	// DefMaxAge is the default duration for which observations stay
+	// relevant.
+	DefMaxAge time.Duration = 10 * time.Minute
+	// DefAgeBuckets is the default number of buckets used to calculate the
+	// age of observations.
+	DefAgeBuckets = 5
+	// DefBufCap is the standard buffer size for collecting Summary observations.
+	DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v1.0.0 of the library.
+type SummaryOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Summary (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Summary must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Summary.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// Due to the way a Summary is represented in the Prometheus text format
+	// and how it is handled by the Prometheus server internally, “quantile”
+	// is an illegal label name. Construction of a Summary or SummaryVec
+	// will panic if this label name is used in ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Objectives defines the quantile rank estimates with their respective
+	// absolute error. If Objectives[q] = e, then the value reported for q
+	// will be the φ-quantile value for some φ between q-e and q+e.  The
+	// default value is an empty map, resulting in a summary without
+	// quantiles.
+	Objectives map[float64]float64
+
+	// MaxAge defines the duration for which an observation stays relevant
+	// for the summary. Only applies to pre-calculated quantiles, does not
+	// apply to _sum and _count. Must be positive. The default value is
+	// DefMaxAge.
+	MaxAge time.Duration
+
+	// AgeBuckets is the number of buckets used to exclude observations that
+	// are older than MaxAge from the summary. A higher number has a
+	// resource penalty, so only increase it if the higher resolution is
+	// really required. For very high observation rates, you might want to
+	// reduce the number of age buckets. With only one age bucket, you will
+	// effectively see a complete reset of the summary each time MaxAge has
+	// passed. The default value is DefAgeBuckets.
+	AgeBuckets uint32
+
+	// BufCap defines the default sample stream buffer size.  The default
+	// value of DefBufCap should suffice for most uses. If there is a need
+	// to increase the value, a multiple of 500 is recommended (because that
+	// is the internal buffer size of the underlying package
+	// "github.com/bmizerany/perks/quantile").
+	BufCap uint32
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+	SummaryOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// Problem with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+	return newSummary(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+	}
+
+	for _, n := range desc.variableLabels.names {
+		if n == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+
+	if opts.Objectives == nil {
+		opts.Objectives = map[float64]float64{}
+	}
+
+	if opts.MaxAge < 0 {
+		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+	}
+	if opts.MaxAge == 0 {
+		opts.MaxAge = DefMaxAge
+	}
+
+	if opts.AgeBuckets == 0 {
+		opts.AgeBuckets = DefAgeBuckets
+	}
+
+	if opts.BufCap == 0 {
+		opts.BufCap = DefBufCap
+	}
+
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	if len(opts.Objectives) == 0 {
+		// Use the lock-free implementation of a Summary without objectives.
+		s := &noObjectivesSummary{
+			desc:       desc,
+			labelPairs: MakeLabelPairs(desc, labelValues),
+			counts:     [2]*summaryCounts{{}, {}},
+		}
+		s.init(s) // Init self-collection.
+		s.createdTs = timestamppb.New(opts.now())
+		return s
+	}
+
+	s := &summary{
+		desc: desc,
+		now:  opts.now,
+
+		objectives:       opts.Objectives,
+		sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+		labelPairs: MakeLabelPairs(desc, labelValues),
+
+		hotBuf:         make([]float64, 0, opts.BufCap),
+		coldBuf:        make([]float64, 0, opts.BufCap),
+		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+	}
+	s.headStreamExpTime = opts.now().Add(s.streamDuration)
+	s.hotBufExpTime = s.headStreamExpTime
+
+	for i := uint32(0); i < opts.AgeBuckets; i++ {
+		s.streams = append(s.streams, s.newStream())
+	}
+	s.headStream = s.streams[0]
+
+	for qu := range s.objectives {
+		s.sortedObjectives = append(s.sortedObjectives, qu)
+	}
+	sort.Float64s(s.sortedObjectives)
+
+	s.init(s) // Init self-collection.
+	s.createdTs = timestamppb.New(opts.now())
+	return s
+}
+
+type summary struct {
+	selfCollector
+
+	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+	mtx    sync.Mutex // Protects every other moving part.
+	// Lock bufMtx before mtx if both are needed.
+
+	desc *Desc
+
+	now func() time.Time
+
+	objectives       map[float64]float64
+	sortedObjectives []float64
+
+	labelPairs []*dto.LabelPair
+
+	sum float64
+	cnt uint64
+
+	hotBuf, coldBuf []float64
+
+	streams                          []*quantile.Stream
+	streamDuration                   time.Duration
+	headStream                       *quantile.Stream
+	headStreamIdx                    int
+	headStreamExpTime, hotBufExpTime time.Time
+
+	createdTs *timestamppb.Timestamp
+}
+
+func (s *summary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+	s.bufMtx.Lock()
+	defer s.bufMtx.Unlock()
+
+	now := s.now()
+	if now.After(s.hotBufExpTime) {
+		s.asyncFlush(now)
+	}
+	s.hotBuf = append(s.hotBuf, v)
+	if len(s.hotBuf) == cap(s.hotBuf) {
+		s.asyncFlush(now)
+	}
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
+	qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+	s.bufMtx.Lock()
+	s.mtx.Lock()
+	// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+	s.swapBufs(s.now())
+	s.bufMtx.Unlock()
+
+	s.flushColdBuf()
+	sum.SampleCount = proto.Uint64(s.cnt)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for _, rank := range s.sortedObjectives {
+		var q float64
+		if s.headStream.Count() == 0 {
+			q = math.NaN()
+		} else {
+			q = s.headStream.Query(rank)
+		}
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	s.mtx.Unlock()
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+	return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+	return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+	s.mtx.Lock()
+	s.swapBufs(now)
+
+	// Unblock the original goroutine that was responsible for the mutation
+	// that triggered the compaction.  But hold onto the global non-buffer
+	// state mutex until the operation finishes.
+	go func() {
+		s.flushColdBuf()
+		s.mtx.Unlock()
+	}()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+	for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+		s.headStream.Reset()
+		s.headStreamIdx++
+		if s.headStreamIdx >= len(s.streams) {
+			s.headStreamIdx = 0
+		}
+		s.headStream = s.streams[s.headStreamIdx]
+		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+	}
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+	for _, v := range s.coldBuf {
+		for _, stream := range s.streams {
+			stream.Insert(v)
+		}
+		s.cnt++
+		s.sum += v
+	}
+	s.coldBuf = s.coldBuf[0:0]
+	s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+	if len(s.coldBuf) != 0 {
+		panic("coldBuf is not empty")
+	}
+	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+	// hotBuf is now empty and gets new expiration set.
+	for now.After(s.hotBufExpTime) {
+		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+	}
+}
+
+type summaryCounts struct {
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations. sumBits and count have to go first in the struct to
+	// guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	sumBits uint64
+	count   uint64
+}
+
+type noObjectivesSummary struct {
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// summaryCounts, as a marker for completion.
+	//
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the summary) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cool fields must
+	// be merged into the new hot before releasing writeMtx.
+
+	// Fields with atomic access first! See alignment constraint:
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc     *Desc
+	writeMtx sync.Mutex // Only used in the Write method.
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*summaryCounts
+
+	labelPairs []*dto.LabelPair
+
+	createdTs *timestamppb.Timestamp
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1)
+	hotCounts := s.counts[n>>63]
+
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			break
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
+	s.writeMtx.Lock()
+	defer s.writeMtx.Unlock()
+
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := s.counts[n>>63]
+	coldCounts := s.counts[(^n)>>63]
+
+	// Await cooldown.
+	for count != atomic.LoadUint64(&coldCounts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+
+	sum := &dto.Summary{
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: s.createdTs,
+	}
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
+	atomic.AddUint64(&hotCounts.count, count)
+	atomic.StoreUint64(&coldCounts.count, 0)
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
+			break
+		}
+	}
+	return nil
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+	return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+	return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+	*MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+	return V2.NewSummaryVec(SummaryVecOpts{
+		SummaryOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+	for _, ln := range opts.VariableLabels.labelNames() {
+		if ln == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &SummaryVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			return newSummary(desc, opts.SummaryOpts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+	s, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+	s, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &SummaryVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constSummary struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	quantiles  map[float64]float64
+	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
+}
+
+func (s *constSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
+	qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+	sum.SampleCount = proto.Uint64(s.count)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for rank, q := range s.quantiles {
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+//
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 0000000..52344fe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,81 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+	begin    time.Time
+	observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
+// following way:
+//
+//	func TimeMe() {
+//	    timer := NewTimer(myHistogram)
+//	    defer timer.ObserveDuration()
+//	    // Do actual work.
+//	}
+//
+// or
+//
+//	func TimeMeWithExemplar() {
+//		    timer := NewTimer(myHistogram)
+//		    defer timer.ObserveDurationWithExemplar(exemplar)
+//		    // Do actual work.
+//		}
+func NewTimer(o Observer) *Timer {
+	return &Timer{
+		begin:    time.Now(),
+		observer: o,
+	}
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() time.Duration {
+	d := time.Since(t.begin)
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+	d := time.Since(t.begin)
+	eo, ok := t.observer.(ExemplarObserver)
+	if ok && exemplar != nil {
+		eo.ObserveWithExemplar(d.Seconds(), exemplar)
+		return d
+	}
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..0f9ce63
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+	Metric
+	Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..cc23011
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,274 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"time"
+	"unicode/utf8"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
+// with an unknown type.
+const (
+	_ ValueType = iota
+	CounterValue
+	GaugeValue
+	UntypedValue
+)
+
+var (
+	CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+	GaugeMetricTypePtr   = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+	UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+	switch v {
+	case CounterValue:
+		return CounterMetricTypePtr
+	case GaugeValue:
+		return GaugeMetricTypePtr
+	default:
+		return UntypedMetricTypePtr
+	}
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+	selfCollector
+
+	desc       *Desc
+	valType    ValueType
+	function   func() float64
+	labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+	result := &valueFunc{
+		desc:       desc,
+		valType:    valueType,
+		function:   function,
+		labelPairs: MakeLabelPairs(desc, nil),
+	}
+	result.init(result)
+	return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+	return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
+		return nil, err
+	}
+
+	return &constMetric{
+		desc:   desc,
+		metric: metric,
+	}, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+	m, err := NewConstMetric(desc, valueType, value, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	switch valueType {
+	case CounterValue:
+		break
+	default:
+		return nil, errors.New("created timestamps are only supported for counters")
+	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+		return nil, err
+	}
+
+	return &constMetric{
+		desc:   desc,
+		metric: metric,
+	}, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+	m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type constMetric struct {
+	desc   *Desc
+	metric *dto.Metric
+}
+
+func (m *constMetric) Desc() *Desc {
+	return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+	out.Label = m.metric.Label
+	out.Counter = m.metric.Counter
+	out.Gauge = m.metric.Gauge
+	out.Untyped = m.metric.Untyped
+	return nil
+}
+
+func populateMetric(
+	t ValueType,
+	v float64,
+	labelPairs []*dto.LabelPair,
+	e *dto.Exemplar,
+	m *dto.Metric,
+	ct *timestamppb.Timestamp,
+) error {
+	m.Label = labelPairs
+	switch t {
+	case CounterValue:
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
+	case GaugeValue:
+		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+	case UntypedValue:
+		m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+	default:
+		return fmt.Errorf("encountered unknown type %v", t)
+	}
+	return nil
+}
+
+// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
+// variable and constant labels in the provided Desc. The values for the
+// variable labels are defined by the labelValues slice, which must be in the
+// same order as the corresponding variable labels in the Desc.
+//
+// This function is only needed for custom Metric implementations. See MetricVec
+// example.
+func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+	totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
+	if totalLen == 0 {
+		// Super fast path.
+		return nil
+	}
+	if len(desc.variableLabels.names) == 0 {
+		// Moderately fast path.
+		return desc.constLabelPairs
+	}
+	labelPairs := make([]*dto.LabelPair, 0, totalLen)
+	for i, l := range desc.variableLabels.names {
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(l),
+			Value: proto.String(labelValues[i]),
+		})
+	}
+	labelPairs = append(labelPairs, desc.constLabelPairs...)
+	sort.Sort(internal.LabelPairSorter(labelPairs))
+	return labelPairs
+}
+
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
+const ExemplarMaxRunes = 128
+
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
+// returned if any of the label names or values are invalid or if the total
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
+	e := &dto.Exemplar{}
+	e.Value = proto.Float64(value)
+	tsProto := timestamppb.New(ts)
+	if err := tsProto.CheckValid(); err != nil {
+		return nil, err
+	}
+	e.Timestamp = tsProto
+	labelPairs := make([]*dto.LabelPair, 0, len(l))
+	var runes int
+	for name, value := range l {
+		if !checkLabelName(name) {
+			return nil, fmt.Errorf("exemplar label name %q is invalid", name)
+		}
+		runes += utf8.RuneCountInString(name)
+		if !utf8.ValidString(value) {
+			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
+		}
+		runes += utf8.RuneCountInString(value)
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(name),
+			Value: proto.String(value),
+		})
+	}
+	if runes > ExemplarMaxRunes {
+		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
+	}
+	e.Label = labelPairs
+	return e, nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..487b466
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,709 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. MetricVec is not used directly but as a building block
+// for implementations of vectors of a given metric type, like GaugeVec,
+// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
+// used for custom Metric implementations.
+//
+// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
+// FooVec and initialize it with NewMetricVec. Implement wrappers for
+// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
+// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
+// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
+// add the convenience methods WithLabelValues, With, and MustCurryWith, which
+// panic instead of returning errors. See also the MetricVec example.
+type MetricVec struct {
+	*metricMap
+
+	curry []curriedLabelValue
+
+	// hashAdd and hashAddByte can be replaced for testing collision handling.
+	hashAdd     func(h uint64, s string) uint64
+	hashAddByte func(h uint64, b byte) uint64
+}
+
+// NewMetricVec returns an initialized metricVec.
+func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+	return &MetricVec{
+		metricMap: &metricMap{
+			metrics:   map[uint64][]metricWithLabelValues{},
+			desc:      desc,
+			newMetric: newMetric,
+		},
+		hashAdd:     hashAdd,
+		hashAddByte: hashAddByte,
+	}
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return false
+	}
+
+	return m.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return false
+	}
+
+	return m.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	return m.deleteByLabels(labels, m.curry)
+}
+
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
+// show up in GoDoc.
+
+// Describe implements Collector.
+func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() { m.metricMap.Reset() }
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the MetricVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+//
+// Note that CurryWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
+	var (
+		newCurry []curriedLabelValue
+		oldCurry = m.curry
+		iCurry   int
+	)
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
+		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+			if ok {
+				return nil, fmt.Errorf("label name %q is already curried", labelName)
+			}
+			newCurry = append(newCurry, oldCurry[iCurry])
+			iCurry++
+		} else {
+			if !ok {
+				continue // Label stays uncurried.
+			}
+			newCurry = append(newCurry, curriedLabelValue{
+				i,
+				m.desc.variableLabels.constrain(labelName, val),
+			})
+		}
+	}
+	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+		return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+	}
+
+	return &MetricVec{
+		metricMap:   m.metricMap,
+		curry:       newCurry,
+		hashAdd:     m.hashAdd,
+		hashAddByte: m.hashAddByte,
+	}, nil
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created (by
+// calling the newMetric function provided during construction of the
+// MetricVec).
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it in its initial state.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+//
+// Note that GetMetricWithLabelValues is usually not called directly but through
+// a wrapper around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+//
+// Note that GetMetricWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric implementation,
+// for example GaugeVec.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+	if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h             = hashNew()
+		curry         = m.curry
+		iVals, iCurry int
+	)
+	for i := 0; i < len(m.desc.variableLabels.names); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			h = m.hashAdd(h, vals[iVals])
+			iVals++
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+	if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h      = hashNew()
+		curry  = m.curry
+		iCurry int
+	)
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if ok {
+				return 0, fmt.Errorf("label name %q is already curried", labelName)
+			}
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			if !ok {
+				return 0, fmt.Errorf("label name %q missing in label map", labelName)
+			}
+			h = m.hashAdd(h, val)
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+	values []string
+	metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+	index int
+	value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+	mtx       sync.RWMutex // Protects metrics.
+	metrics   map[uint64][]metricWithLabelValues
+	desc      *Desc
+	newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+	ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	for _, metrics := range m.metrics {
+		for _, metric := range metrics {
+			ch <- metric.metric
+		}
+	}
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for h := range m.metrics {
+		delete(m.metrics, h)
+	}
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+
+	i := findMetricWithLabelValues(metrics, lvs, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		old := metrics
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+		old[len(old)-1] = metricWithLabelValues{}
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+	i := findMetricWithLabels(m.desc, metrics, labels, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		old := metrics
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+		old[len(old)-1] = metricWithLabelValues{}
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	var numDeleted int
+
+	for h, metrics := range m.metrics {
+		i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+		if i >= len(metrics) {
+			// Didn't find matching labels in this metric slice.
+			continue
+		}
+		delete(m.metrics, h)
+		numDeleted++
+	}
+
+	return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchPartialLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+	for i, l := range items {
+		if l == target {
+			return i, true
+		}
+	}
+	return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+	for _, curriedValue := range curry {
+		if curriedValue.index == index {
+			// This label was curried. See if the curried value matches our target.
+			return curriedValue.value == targetValue, true
+		}
+	}
+	// This label was not curried. See if the current value matches our target label.
+	return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	for l, v := range labels {
+		// Check if the target label exists in our metrics and get the index.
+		varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+		if validLabel {
+			// Check the value of that label against the target value.
+			// We don't consider curried values in partial matches.
+			matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+			if matches && !curried {
+				continue
+			}
+		}
+		return false
+	}
+	return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+	hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	if !ok {
+		inlinedLVs := inlineLabelValues(lvs, curry)
+		metric = m.newMetric(inlinedLVs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+	}
+	return metric
+}
+
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+	hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+	if !ok {
+		lvs := extractLabelValues(m.desc, labels, curry)
+		metric = m.newMetric(lvs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+	}
+	return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+	metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabelValues(metric.values, lvs, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
+	if len(values) != len(lvs)+len(curry) {
+		return false
+	}
+	var iLVs, iCurry int
+	for i, v := range values {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if v != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if v != lvs[iLVs] {
+			return false
+		}
+		iLVs++
+	}
+	return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	if len(values) != len(labels)+len(curry) {
+		return false
+	}
+	iCurry := 0
+	for i, k := range desc.variableLabels.names {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if values[i] != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if values[i] != labels[k] {
+			return false
+		}
+	}
+	return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(labels)+len(curry))
+	iCurry := 0
+	for i, k := range desc.variableLabels.names {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = labels[k]
+	}
+	return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(lvs)+len(curry))
+	var iCurry, iLVs int
+	for i := range labelValues {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = lvs[iLVs]
+		iLVs++
+	}
+	return labelValues
+}
+
+var labelsPool = &sync.Pool{
+	New: func() interface{} {
+		return make(Labels)
+	},
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return labels, func() {}
+	}
+
+	constrainedLabels := labelsPool.Get().(Labels)
+	for l, v := range labels {
+		constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+	}
+
+	return constrainedLabels, func() {
+		for k := range constrainedLabels {
+			delete(constrainedLabels, k)
+		}
+		labelsPool.Put(constrainedLabels)
+	}
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return lvs
+	}
+
+	constrainedValues := make([]string, len(lvs))
+	var iCurry, iLVs int
+	for i := 0; i < len(lvs)+len(curry); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			iCurry++
+			continue
+		}
+
+		if i < len(desc.variableLabels.names) {
+			constrainedValues[iLVs] = desc.variableLabels.constrain(
+				desc.variableLabels.names[i],
+				lvs[iLVs],
+			)
+		} else {
+			constrainedValues[iLVs] = lvs[iLVs]
+		}
+		iLVs++
+	}
+	return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000..42bc3a8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 0000000..2ed1285
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,248 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels. Wrapping a nil value is valid, resulting
+// in a no-op Registerer.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics
+// exposed. See also
+// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		labels:            labels,
+	}
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+// Wrapping a nil value is valid, resulting in a no-op Registerer.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with "go_" or "process_",
+// respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		prefix:            prefix,
+	}
+}
+
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		labels:           labels,
+	}
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		prefix:           prefix,
+	}
+}
+
+type wrappingRegisterer struct {
+	wrappedRegisterer Registerer
+	prefix            string
+	labels            Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+	if r.wrappedRegisterer == nil {
+		return nil
+	}
+	return r.wrappedRegisterer.Register(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+	if r.wrappedRegisterer == nil {
+		return
+	}
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+	if r.wrappedRegisterer == nil {
+		return false
+	}
+	return r.wrappedRegisterer.Unregister(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+type wrappingCollector struct {
+	wrappedCollector Collector
+	prefix           string
+	labels           Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+	wrappedCh := make(chan Metric)
+	go func() {
+		c.wrappedCollector.Collect(wrappedCh)
+		close(wrappedCh)
+	}()
+	for m := range wrappedCh {
+		ch <- &wrappingMetric{
+			wrappedMetric: m,
+			prefix:        c.prefix,
+			labels:        c.labels,
+		}
+	}
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+	wrappedCh := make(chan *Desc)
+	go func() {
+		c.wrappedCollector.Describe(wrappedCh)
+		close(wrappedCh)
+	}()
+	for desc := range wrappedCh {
+		ch <- wrapDesc(desc, c.prefix, c.labels)
+	}
+}
+
+func (c *wrappingCollector) unwrapRecursively() Collector {
+	switch wc := c.wrappedCollector.(type) {
+	case *wrappingCollector:
+		return wc.unwrapRecursively()
+	default:
+		return wc
+	}
+}
+
+type wrappingMetric struct {
+	wrappedMetric Metric
+	prefix        string
+	labels        Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+	return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+	if err := m.wrappedMetric.Write(out); err != nil {
+		return err
+	}
+	if len(m.labels) == 0 {
+		// No wrapping labels.
+		return nil
+	}
+	for ln, lv := range m.labels {
+		out.Label = append(out.Label, &dto.LabelPair{
+			Name:  proto.String(ln),
+			Value: proto.String(lv),
+		})
+	}
+	sort.Sort(internal.LabelPairSorter(out.Label))
+	return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+	constLabels := Labels{}
+	for _, lp := range desc.constLabelPairs {
+		constLabels[*lp.Name] = *lp.Value
+	}
+	for ln, lv := range labels {
+		if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+			return &Desc{
+				fqName:          desc.fqName,
+				help:            desc.help,
+				variableLabels:  desc.variableLabels,
+				constLabelPairs: desc.constLabelPairs,
+				err:             fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+			}
+		}
+		constLabels[ln] = lv
+	}
+	// NewDesc will do remaining validations.
+	newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+	// Propagate errors if there was any. This will override any errer
+	// created by NewDesc above, i.e. earlier errors get precedence.
+	if desc.err != nil {
+		newDesc.err = desc.err
+	}
+	return newDesc
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..2f15490
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,1399 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.30.0
+// 	protoc        v3.20.3
+// source: io/prometheus/client/metrics.proto
+
+package io_prometheus_client
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MetricType int32
+
+const (
+	// COUNTER must use the Metric field "counter".
+	MetricType_COUNTER MetricType = 0
+	// GAUGE must use the Metric field "gauge".
+	MetricType_GAUGE MetricType = 1
+	// SUMMARY must use the Metric field "summary".
+	MetricType_SUMMARY MetricType = 2
+	// UNTYPED must use the Metric field "untyped".
+	MetricType_UNTYPED MetricType = 3
+	// HISTOGRAM must use the Metric field "histogram".
+	MetricType_HISTOGRAM MetricType = 4
+	// GAUGE_HISTOGRAM must use the Metric field "histogram".
+	MetricType_GAUGE_HISTOGRAM MetricType = 5
+)
+
+// Enum value maps for MetricType.
+var (
+	MetricType_name = map[int32]string{
+		0: "COUNTER",
+		1: "GAUGE",
+		2: "SUMMARY",
+		3: "UNTYPED",
+		4: "HISTOGRAM",
+		5: "GAUGE_HISTOGRAM",
+	}
+	MetricType_value = map[string]int32{
+		"COUNTER":         0,
+		"GAUGE":           1,
+		"SUMMARY":         2,
+		"UNTYPED":         3,
+		"HISTOGRAM":       4,
+		"GAUGE_HISTOGRAM": 5,
+	}
+)
+
+func (x MetricType) Enum() *MetricType {
+	p := new(MetricType)
+	*p = x
+	return p
+}
+
+func (x MetricType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+	return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+	return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = MetricType(num)
+	return nil
+}
+
+// Deprecated: Use MetricType.Descriptor instead.
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+type LabelPair struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name  *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *LabelPair) Reset() {
+	*x = LabelPair{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LabelPair) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *LabelPair) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *LabelPair) GetValue() string {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return ""
+}
+
+type Gauge struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Gauge) Reset() {
+	*x = Gauge{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Gauge) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Gauge) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Counter struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value            *float64               `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	Exemplar         *Exemplar              `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Counter) Reset() {
+	*x = Counter{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Counter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Counter) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+func (x *Counter) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
+	}
+	return nil
+}
+
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+type Quantile struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+	Value    *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Quantile) Reset() {
+	*x = Quantile{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Quantile) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Quantile) GetQuantile() float64 {
+	if x != nil && x.Quantile != nil {
+		return *x.Quantile
+	}
+	return 0
+}
+
+func (x *Quantile) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Summary struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64                `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum        *float64               `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Quantile         []*Quantile            `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Summary) Reset() {
+	*x = Summary{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Summary) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Summary) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
+	}
+	return 0
+}
+
+func (x *Summary) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
+	}
+	return 0
+}
+
+func (x *Summary) GetQuantile() []*Quantile {
+	if x != nil {
+		return x.Quantile
+	}
+	return nil
+}
+
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+type Untyped struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Untyped) Reset() {
+	*x = Untyped{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Untyped) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Untyped) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Histogram struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64  `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+	SampleSum        *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	// Buckets for the conventional histogram.
+	Bucket           []*Bucket              `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+	// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+	// then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+	// In the future, more bucket schemas may be added using numbers < -4 or > 8.
+	Schema         *int32   `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+	ZeroThreshold  *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`      // Breadth of the zero bucket.
+	ZeroCount      *uint64  `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`                   // Count in zero bucket.
+	ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+	// Negative buckets for the native histogram.
+	NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+	// Use either "negative_delta" or "negative_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`  // Absolute count of each bucket.
+	// Positive buckets for the native histogram.
+	// Use a no-op span (offset 0, length 0) for a native histogram without any
+	// observations yet and with a zero_threshold of 0. Otherwise, it would be
+	// indistinguishable from a classic histogram.
+	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+	// Use either "positive_delta" or "positive_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDelta []int64   `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`  // Absolute count of each bucket.
+	// Only used for native histograms. These exemplars MUST have a timestamp.
+	Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+	*x = Histogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Histogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Histogram) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
+	}
+	return 0
+}
+
+func (x *Histogram) GetSampleCountFloat() float64 {
+	if x != nil && x.SampleCountFloat != nil {
+		return *x.SampleCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
+	}
+	return 0
+}
+
+func (x *Histogram) GetBucket() []*Bucket {
+	if x != nil {
+		return x.Bucket
+	}
+	return nil
+}
+
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+func (x *Histogram) GetSchema() int32 {
+	if x != nil && x.Schema != nil {
+		return *x.Schema
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+	if x != nil && x.ZeroThreshold != nil {
+		return *x.ZeroThreshold
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCount() uint64 {
+	if x != nil && x.ZeroCount != nil {
+		return *x.ZeroCount
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+	if x != nil && x.ZeroCountFloat != nil {
+		return *x.ZeroCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+	if x != nil {
+		return x.NegativeSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+	if x != nil {
+		return x.NegativeDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeCount() []float64 {
+	if x != nil {
+		return x.NegativeCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+	if x != nil {
+		return x.PositiveSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+	if x != nil {
+		return x.PositiveDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+	if x != nil {
+		return x.PositiveCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
+type Bucket struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`                   // Cumulative in increasing order.
+	CumulativeCountFloat *float64  `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`                                 // Inclusive.
+	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
+}
+
+func (x *Bucket) Reset() {
+	*x = Bucket{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Bucket) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Bucket) GetCumulativeCount() uint64 {
+	if x != nil && x.CumulativeCount != nil {
+		return *x.CumulativeCount
+	}
+	return 0
+}
+
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+	if x != nil && x.CumulativeCountFloat != nil {
+		return *x.CumulativeCountFloat
+	}
+	return 0
+}
+
+func (x *Bucket) GetUpperBound() float64 {
+	if x != nil && x.UpperBound != nil {
+		return *x.UpperBound
+	}
+	return 0
+}
+
+func (x *Bucket) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
+	}
+	return nil
+}
+
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Offset *int32  `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+	Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`   // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+	*x = BucketSpan{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BucketSpan) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+	if x != nil && x.Offset != nil {
+		return *x.Offset
+	}
+	return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+	if x != nil && x.Length != nil {
+		return *x.Length
+	}
+	return 0
+}
+
+type Exemplar struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label     []*LabelPair           `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Value     *float64               `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
+}
+
+func (x *Exemplar) Reset() {
+	*x = Exemplar{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Exemplar) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Exemplar) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
+	}
+	return nil
+}
+
+func (x *Exemplar) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
+	}
+	return nil
+}
+
+type Metric struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label       []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Gauge       *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+	Counter     *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+	Summary     *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+	Untyped     *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+	Histogram   *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+	TimestampMs *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+	*x = Metric{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metric) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Metric) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
+	}
+	return nil
+}
+
+func (x *Metric) GetGauge() *Gauge {
+	if x != nil {
+		return x.Gauge
+	}
+	return nil
+}
+
+func (x *Metric) GetCounter() *Counter {
+	if x != nil {
+		return x.Counter
+	}
+	return nil
+}
+
+func (x *Metric) GetSummary() *Summary {
+	if x != nil {
+		return x.Summary
+	}
+	return nil
+}
+
+func (x *Metric) GetUntyped() *Untyped {
+	if x != nil {
+		return x.Untyped
+	}
+	return nil
+}
+
+func (x *Metric) GetHistogram() *Histogram {
+	if x != nil {
+		return x.Histogram
+	}
+	return nil
+}
+
+func (x *Metric) GetTimestampMs() int64 {
+	if x != nil && x.TimestampMs != nil {
+		return *x.TimestampMs
+	}
+	return 0
+}
+
+type MetricFamily struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name   *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Help   *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+	Type   *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+	Metric []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+	Unit   *string     `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+	*x = MetricFamily{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetricFamily) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *MetricFamily) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *MetricFamily) GetHelp() string {
+	if x != nil && x.Help != nil {
+		return *x.Help
+	}
+	return ""
+}
+
+func (x *MetricFamily) GetType() MetricType {
+	if x != nil && x.Type != nil {
+		return *x.Type
+	}
+	return MetricType_COUNTER
+}
+
+func (x *MetricFamily) GetMetric() []*Metric {
+	if x != nil {
+		return x.Metric
+	}
+	return nil
+}
+
+func (x *MetricFamily) GetUnit() string {
+	if x != nil && x.Unit != nil {
+		return *x.Unit
+	}
+	return ""
+}
+
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+	0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+	0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+	0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+	0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+	0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+	0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+	0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+	0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+	0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+	0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+	0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+	0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+	0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+	0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+	0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+	0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+	0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+	0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+	0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+	0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+	0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+	0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+	0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+	0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+	0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+	0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+	0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+	0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+	0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+	0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+	0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+	0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+	0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+	0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+	0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+	0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+	0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+	0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+	0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+	0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+	0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+	0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+	0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+	0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+	0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+	0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+	0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+	0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+	0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+	0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+	0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+	0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+	0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+	0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+	0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+	0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+	0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+	0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+	file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+	file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+	file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+		file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+	})
+	return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+	(MetricType)(0),               // 0: io.prometheus.client.MetricType
+	(*LabelPair)(nil),             // 1: io.prometheus.client.LabelPair
+	(*Gauge)(nil),                 // 2: io.prometheus.client.Gauge
+	(*Counter)(nil),               // 3: io.prometheus.client.Counter
+	(*Quantile)(nil),              // 4: io.prometheus.client.Quantile
+	(*Summary)(nil),               // 5: io.prometheus.client.Summary
+	(*Untyped)(nil),               // 6: io.prometheus.client.Untyped
+	(*Histogram)(nil),             // 7: io.prometheus.client.Histogram
+	(*Bucket)(nil),                // 8: io.prometheus.client.Bucket
+	(*BucketSpan)(nil),            // 9: io.prometheus.client.BucketSpan
+	(*Exemplar)(nil),              // 10: io.prometheus.client.Exemplar
+	(*Metric)(nil),                // 11: io.prometheus.client.Metric
+	(*MetricFamily)(nil),          // 12: io.prometheus.client.MetricFamily
+	(*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+	10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+	13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+	4,  // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+	13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+	8,  // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+	13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+	9,  // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+	9,  // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+	10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+	10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+	1,  // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+	13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+	1,  // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+	2,  // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+	3,  // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+	5,  // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+	6,  // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+	7,  // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+	0,  // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+	11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+	20, // [20:20] is the sub-list for method output_type
+	20, // [20:20] is the sub-list for method input_type
+	20, // [20:20] is the sub-list for extension type_name
+	20, // [20:20] is the sub-list for extension extendee
+	0,  // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+	if File_io_prometheus_client_metrics_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LabelPair); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Gauge); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Counter); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Quantile); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Summary); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Untyped); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Histogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Bucket); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BucketSpan); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Exemplar); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metric); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetricFamily); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   12,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_io_prometheus_client_metrics_proto_goTypes,
+		DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+		EnumInfos:         file_io_prometheus_client_metrics_proto_enumTypes,
+		MessageInfos:      file_io_prometheus_client_metrics_proto_msgTypes,
+	}.Build()
+	File_io_prometheus_client_metrics_proto = out.File
+	file_io_prometheus_client_metrics_proto_rawDesc = nil
+	file_io_prometheus_client_metrics_proto_goTypes = nil
+	file_io_prometheus_client_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..7b76237
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,456 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"math"
+	"mime"
+	"net/http"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
+
+	"github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+	Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+	// Timestamp is added to each value from the stream that has no explicit timestamp set.
+	Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+	ct := h.Get(hdrContentType)
+
+	mediatype, params, err := mime.ParseMediaType(ct)
+	if err != nil {
+		return FmtUnknown
+	}
+
+	const textType = "text/plain"
+
+	switch mediatype {
+	case ProtoType:
+		if p, ok := params["proto"]; ok && p != ProtoProtocol {
+			return FmtUnknown
+		}
+		if e, ok := params["encoding"]; ok && e != "delimited" {
+			return FmtUnknown
+		}
+		return FmtProtoDelim
+
+	case textType:
+		if v, ok := params["version"]; ok && v != TextVersion {
+			return FmtUnknown
+		}
+		return FmtText
+	}
+
+	return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format.  For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
+func NewDecoder(r io.Reader, format Format) Decoder {
+	scheme := model.LegacyValidation
+	if format.ToEscapingScheme() == model.NoEscaping {
+		scheme = model.UTF8Validation
+	}
+	switch format.FormatType() {
+	case TypeProtoDelim:
+		return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+	case TypeProtoText, TypeProtoCompact:
+		return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
+	}
+	return &textDecoder{r: r, s: scheme}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+	r protodelim.Reader
+	s model.ValidationScheme
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+	opts := protodelim.UnmarshalOptions{
+		MaxSize: -1,
+	}
+	if err := opts.UnmarshalFrom(d.r, v); err != nil {
+		return err
+	}
+	if !d.s.IsValidMetricName(v.GetName()) {
+		return fmt.Errorf("invalid metric name %q", v.GetName())
+	}
+	for _, m := range v.GetMetric() {
+		if m == nil {
+			continue
+		}
+		for _, l := range m.GetLabel() {
+			if l == nil {
+				continue
+			}
+			if !model.LabelValue(l.GetValue()).IsValid() {
+				return fmt.Errorf("invalid label value %q", l.GetValue())
+			}
+			if !d.s.IsValidLabelName(l.GetName()) {
+				return fmt.Errorf("invalid label name %q", l.GetName())
+			}
+		}
+	}
+	return nil
+}
+
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+	err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+	return d.err
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+	r    io.Reader
+	fams map[string]*dto.MetricFamily
+	s    model.ValidationScheme
+	err  error
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+	if d.err == nil {
+		// Read all metrics in one shot.
+		p := NewTextParser(d.s)
+		d.fams, d.err = p.TextToMetricFamilies(d.r)
+		// If we don't get an error, store io.EOF for the end.
+		if d.err == nil {
+			d.err = io.EOF
+		}
+	}
+	// Pick off one MetricFamily per Decode until there's nothing left.
+	for key, fam := range d.fams {
+		v.Name = fam.Name
+		v.Help = fam.Help
+		v.Type = fam.Type
+		v.Metric = fam.Metric
+		delete(d.fams, key)
+		return nil
+	}
+	return d.err
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+	Dec  Decoder
+	Opts *DecodeOptions
+
+	f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+	err := sd.Dec.Decode(&sd.f)
+	if err != nil {
+		return err
+	}
+	*s, err = extractSamples(&sd.f, sd.Opts)
+	return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+	var (
+		all     model.Vector
+		lastErr error
+	)
+	for _, f := range fams {
+		some, err := extractSamples(f, o)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		all = append(all, some...)
+	}
+	return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+	switch f.GetType() {
+	case dto.MetricType_COUNTER:
+		return extractCounter(o, f), nil
+	case dto.MetricType_GAUGE:
+		return extractGauge(o, f), nil
+	case dto.MetricType_SUMMARY:
+		return extractSummary(o, f), nil
+	case dto.MetricType_UNTYPED:
+		return extractUntyped(o, f), nil
+	case dto.MetricType_HISTOGRAM:
+		return extractHistogram(o, f), nil
+	}
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Counter == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Counter.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Gauge == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Gauge.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Untyped == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Untyped.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Summary == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		for _, q := range m.Summary.Quantile {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			// BUG(matt): Update other names to "quantile".
+			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetValue()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleCount()),
+			Timestamp: timestamp,
+		})
+	}
+
+	return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Histogram == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		infSeen := false
+
+		for _, q := range m.Histogram.Bucket {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			if math.IsInf(q.GetUpperBound(), +1) {
+				infSeen = true
+			}
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetCumulativeCount()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		count := &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleCount()),
+			Timestamp: timestamp,
+		}
+		samples = append(samples, count)
+
+		if !infSeen {
+			// Append an infinity bucket sample.
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     count.Value,
+				Timestamp: timestamp,
+			})
+		}
+	}
+
+	return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..73c24df
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,196 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+
+	"github.com/munnerz/goautoneg"
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
+	"google.golang.org/protobuf/encoding/prototext"
+
+	"github.com/prometheus/common/model"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+	Encode(*dto.MetricFamily) error
+}
+
+// Closer is implemented by Encoders that need to be closed to finalize
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
+//
+// Note that all Encoder implementations returned from this package implement
+// Closer, too, even if the Close call is a no-op. This happens in preparation
+// for adding a Close method to the Encoder interface directly in a (mildly
+// breaking) release in the future.
+type Closer interface {
+	Close() error
+}
+
+type encoderCloser struct {
+	encode func(*dto.MetricFamily) error
+	close  func() error
+}
+
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
+	return ec.encode(v)
+}
+
+func (ec encoderCloser) Close() error {
+	return ec.close()
+}
+
+// Negotiate returns the Content-Type based on the given Accept header. If no
+// appropriate accepted type is found, FmtText is returned (which is the
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
+// as the support is still experimental. To include the option to negotiate
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
+func Negotiate(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
+		ver := ac.Params["version"]
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim + escapingScheme
+			case "text":
+				return FmtProtoText + escapingScheme
+			case "compact-text":
+				return FmtProtoCompact + escapingScheme
+			}
+		}
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText + escapingScheme
+		}
+	}
+	return FmtText + escapingScheme
+}
+
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
+// FmtOpenMetrics as an option for the result. Note that this function is
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
+// such may be negotiated by the normal Negotiate function.
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
+		ver := ac.Params["version"]
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim + escapingScheme
+			case "text":
+				return FmtProtoText + escapingScheme
+			case "compact-text":
+				return FmtProtoCompact + escapingScheme
+			}
+		}
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText + escapingScheme
+		}
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+			switch ver {
+			case OpenMetricsVersion_1_0_0:
+				return FmtOpenMetrics_1_0_0 + escapingScheme
+			default:
+				return FmtOpenMetrics_0_0_1 + escapingScheme
+			}
+		}
+	}
+	return FmtText + escapingScheme
+}
+
+// NewEncoder returns a new encoder based on content type negotiation. All
+// Encoder implementations returned by NewEncoder also implement Closer, and
+// callers should always call the Close method. It is currently only required
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
+// to the Encoder interface directly. The current version of the Encoder
+// interface is kept for backwards compatibility.
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+	escapingScheme := format.ToEscapingScheme()
+
+	switch format.FormatType() {
+	case TypeProtoDelim:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeProtoCompact:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeProtoText:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeTextPlain:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeOpenMetrics:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
+				return err
+			},
+			close: func() error {
+				_, err := FinalizeOpenMetrics(w)
+				return err
+			},
+		}
+	}
+	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..c34c7de
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,211 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+import (
+	"errors"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
+const (
+	TextVersion   = "0.0.4"
+	ProtoType     = `application/vnd.google.protobuf`
+	ProtoProtocol = `io.prometheus.client.MetricFamily`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+	ProtoFmt        = ProtoType + "; proto=" + ProtoProtocol + ";"
+	OpenMetricsType = `application/openmetrics-text`
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_0_0_1 = "0.0.1"
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_1_0_0 = "1.0.0"
+
+	// The Content-Type values for the different wire protocols. Do not do direct
+	// comparisons to these constants, instead use the comparison functions.
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+	FmtUnknown Format = `<unknown>`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+	FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+	FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+	FmtProtoText Format = ProtoFmt + ` encoding=text`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
+)
+
+const (
+	hdrContentType = "Content-Type"
+	hdrAccept      = "Accept"
+)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+	TypeUnknown FormatType = iota
+	TypeProtoCompact
+	TypeProtoDelim
+	TypeProtoText
+	TypeTextPlain
+	TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+	switch t {
+	case TypeProtoCompact:
+		return FmtProtoCompact
+	case TypeProtoDelim:
+		return FmtProtoDelim
+	case TypeProtoText:
+		return FmtProtoText
+	case TypeTextPlain:
+		return FmtText
+	case TypeOpenMetrics:
+		return FmtOpenMetrics_1_0_0
+	default:
+		return FmtUnknown
+	}
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+	if version == OpenMetricsVersion_0_0_1 {
+		return FmtOpenMetrics_0_0_1, nil
+	}
+	if version == OpenMetricsVersion_1_0_0 {
+		return FmtOpenMetrics_1_0_0, nil
+	}
+	return FmtUnknown, errors.New("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+	var terms []string
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			trimmed := strings.TrimSpace(p)
+			if len(trimmed) > 0 {
+				terms = append(terms, trimmed)
+			}
+			continue
+		}
+		key := strings.TrimSpace(toks[0])
+		if key != model.EscapingKey {
+			terms = append(terms, strings.TrimSpace(p))
+		}
+	}
+	terms = append(terms, model.EscapingKey+"="+s.String())
+	return Format(strings.Join(terms, "; "))
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+	toks := strings.Split(string(f), ";")
+	params := make(map[string]string)
+	for i, t := range toks {
+		if i == 0 {
+			continue
+		}
+		args := strings.Split(t, "=")
+		if len(args) != 2 {
+			continue
+		}
+		params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+	}
+
+	switch strings.TrimSpace(toks[0]) {
+	case ProtoType:
+		if params["proto"] != ProtoProtocol {
+			return TypeUnknown
+		}
+		switch params["encoding"] {
+		case "delimited":
+			return TypeProtoDelim
+		case "text":
+			return TypeProtoText
+		case "compact-text":
+			return TypeProtoCompact
+		default:
+			return TypeUnknown
+		}
+	case OpenMetricsType:
+		if params["charset"] != "utf-8" {
+			return TypeUnknown
+		}
+		return TypeOpenMetrics
+	case "text/plain":
+		v, ok := params["version"]
+		if !ok {
+			return TypeTextPlain
+		}
+		if v == TextVersion {
+			return TypeTextPlain
+		}
+		return TypeUnknown
+	default:
+		return TypeUnknown
+	}
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			continue
+		}
+		key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+		if key == model.EscapingKey {
+			scheme, err := model.ToEscapingScheme(value)
+			if err != nil {
+				return model.NameEscapingScheme
+			}
+			return scheme
+		}
+	}
+	return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..0290f6a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,40 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+//go:build gofuzz
+// +build gofuzz
+
+package expfmt
+
+import (
+	"bytes"
+
+	"github.com/prometheus/common/model"
+)
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+//	go-fuzz-build github.com/prometheus/common/expfmt
+//	go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+	parser := NewTextParser(model.UTF8Validation)
+	_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+	if err != nil {
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
new file mode 100644
index 0000000..8dbf6d0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -0,0 +1,695 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+
+	"github.com/prometheus/common/model"
+)
+
+type encoderOption struct {
+	withCreatedLines bool
+	withUnit         bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+	return func(t *encoderOption) {
+		t.withCreatedLines = true
+	}
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+	return func(t *encoderOption) {
+		t.withUnit = true
+	}
+}
+
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
+// the number of bytes written and any error encountered. The output will have
+// the same order as the input, no further sorting is performed. Furthermore,
+// this function assumes the input is already sanitized and does not perform any
+// sanity checks. If the input contains duplicate metrics or invalid metric or
+// label names, the conversion will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This function fulfills the type 'expfmt.encoder'.
+//
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
+// on individual metric families, it is the responsibility of the caller to
+// append this line to 'out' once all metric families have been written.
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
+//
+// The output should be fully OpenMetrics compliant. However, there are a few
+// missing features and peculiarities to avoid complications when switching from
+// Prometheus to OpenMetrics or vice versa:
+//
+//   - Counters are expected to have the `_total` suffix in their metric name. In
+//     the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+//     lines. A counter with a missing `_total` suffix is not an error. However,
+//     its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+//     output.
+//
+//   - According to the OM specs, the `# UNIT` line is optional, but if populated,
+//     the unit has to be present in the metric name as its suffix:
+//     (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
+//     However, in order to accommodate any potential scenario where such a change in the
+//     metric name is not desirable, the users are here given the choice of either explicitly
+//     opt in, in case they wish for the unit to be included in the output AND in the metric name
+//     as a suffix (see the description of the WithUnit function above),
+//     or not to opt in, in case they don't want for any of that to happen.
+//
+//   - No support for the following (optional) features: info type,
+//     stateset type, gaugehistogram type.
+//
+//   - The size of exemplar labels is not checked (i.e. it's possible to create
+//     exemplars that are larger than allowed by the OpenMetrics specification).
+//
+//   - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+//     with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+	toOM := encoderOption{}
+	for _, option := range options {
+		option(&toOM)
+	}
+
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bufio.Writer from the sync.Pool.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
+		w = b
+		defer func() {
+			bErr := b.Flush()
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var (
+		n             int
+		metricType    = in.GetType()
+		compliantName = name
+	)
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+		compliantName = name[:len(name)-6]
+	}
+	if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+		compliantName = compliantName + "_" + *in.Unit
+	}
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, compliantName)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = writeName(w, compliantName)
+	written += n
+	if err != nil {
+		return
+	}
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		if strings.HasSuffix(name, "_total") {
+			n, err = w.WriteString(" counter\n")
+		} else {
+			n, err = w.WriteString(" unknown\n")
+		}
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" unknown\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+	if toOM.withUnit && in.Unit != nil {
+		n, err = w.WriteString("# UNIT ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, compliantName)
+		written += n
+		if err != nil {
+			return
+		}
+
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Unit, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+
+	var createdTsBytesWritten int
+
+	// Finally the samples, one line for each.
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+		compliantName += "_total"
+	}
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Counter.GetValue(), 0, false,
+				metric.Counter.Exemplar,
+			)
+			if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Gauge.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Untyped.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", compliantName, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(), 0, false,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_count", metric, "", 0,
+				0, metric.Summary.GetSampleCount(), true,
+				nil,
+			)
+			if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", compliantName, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					0, b.GetCumulativeCount(), true,
+					b.Exemplar,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					0, metric.Histogram.GetSampleCount(), true,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_count", metric, "", 0,
+				0, metric.Histogram.GetSampleCount(), true,
+				nil,
+			)
+			if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", compliantName, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
+	return w.Write([]byte("# EOF\n"))
+}
+
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
+// w, given the metric name, the metric proto message itself, optionally an
+// additional label name with a float64 value (use empty string as label name if
+// not required), the value (optionally as float64 or uint64, determined by
+// useIntValue), and optionally an exemplar (use nil if not required). The
+// function returns the number of bytes written and any error encountered.
+func writeOpenMetricsSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	floatValue float64, intValue uint64, useIntValue bool,
+	exemplar *dto.Exemplar,
+) (int, error) {
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	if useIntValue {
+		n, err = writeUint(w, intValue)
+	} else {
+		n, err = writeOpenMetricsFloat(w, floatValue)
+	}
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		// TODO(beorn7): Format this directly without converting to a float first.
+		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	if exemplar != nil && len(exemplar.Label) > 0 {
+		n, err = writeExemplar(w, exemplar)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
+	w enhancedWriter,
+	name string,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	var (
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
+	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces, quoted.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := writeName(w, lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeOpenMetricsFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+	name, suffixToTrim string, metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+
+	// TODO(beorn7): Format this directly from components of ts to
+	// avoid overflow/underflow and precision issues of the float
+	// conversion.
+	n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
+// function returns the number of bytes written and any error encountered.
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
+	written := 0
+	n, err := w.WriteString(" # ")
+	written += n
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsFloat(w, e.GetValue())
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if e.Timestamp != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		err = e.Timestamp.CheckValid()
+		if err != nil {
+			return written, err
+		}
+		ts := e.Timestamp.AsTime()
+		// TODO(beorn7): Format this directly from components of ts to
+		// avoid overflow/underflow and precision issues of the float
+		// conversion.
+		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	return written, nil
+}
+
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
+// number would otherwise contain neither a "." nor an "e".
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return w.WriteString("1.0")
+	case f == 0:
+		return w.WriteString("0.0")
+	case f == -1:
+		return w.WriteString("-1.0")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		if !bytes.ContainsAny(*bp, "e.") {
+			*bp = append(*bp, '.', '0')
+		}
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeUint is like writeInt just for uint64.
+func writeUint(w enhancedWriter, u uint64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendUint((*bp)[:0], u, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..c4e9c1b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,520 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"sync"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/common/model"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
+// implements it.
+type enhancedWriter interface {
+	io.Writer
+	WriteRune(r rune) (n int, err error)
+	WriteString(s string) (n int, err error)
+	WriteByte(c byte) error
+}
+
+const (
+	initialNumBufSize = 24
+)
+
+var (
+	bufPool = sync.Pool{
+		New: func() interface{} {
+			return bufio.NewWriter(io.Discard)
+		},
+	}
+	numBufPool = sync.Pool{
+		New: func() interface{} {
+			b := make([]byte, 0, initialNumBufSize)
+			return &b
+		},
+	}
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+	// Fail-fast checks.
+	if len(in.Metric) == 0 {
+		return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+	}
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bufio.Writer from the sync.Pool.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
+		w = b
+		defer func() {
+			bErr := b.Flush()
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var n int
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, name)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, false)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = writeName(w, name)
+	written += n
+	if err != nil {
+		return
+	}
+	metricType := in.GetType()
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		n, err = w.WriteString(" counter\n")
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" untyped\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+
+	// Finally the samples, one line for each.
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Counter.GetValue(),
+			)
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Gauge.GetValue(),
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Untyped.GetValue(),
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", name, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeSample(
+					w, name, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Summary.GetSampleCount()),
+			)
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", name, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					float64(b.GetCumulativeCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					float64(metric.Histogram.GetSampleCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Histogram.GetSampleCount()),
+			)
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", name, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	value float64,
+) (int, error) {
+	written := 0
+	n, err := writeNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeFloat(w, value)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err = writeInt(w, *metric.TimestampMs)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
+	w enhancedWriter,
+	name string,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	var (
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
+	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := writeName(w, lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+	escaper       = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+	quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+	if includeDoubleQuote {
+		return quotedEscaper.WriteString(w, v)
+	}
+	return escaper.WriteString(w, v)
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return 1, w.WriteByte('1')
+	case f == 0:
+		return 1, w.WriteByte('0')
+	case f == -1:
+		return w.WriteString("-1")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendInt((*bp)[:0], i, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+	if model.LegacyValidation.IsValidMetricName(name) {
+		return w.WriteString(name)
+	}
+	var written int
+	var err error
+	err = w.WriteByte('"')
+	written++
+	if err != nil {
+		return written, err
+	}
+	var n int
+	n, err = writeEscapedString(w, name, true)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte('"')
+	written++
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..8f2edde
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,933 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+	Line int
+	Msg  string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+	metricFamiliesByName map[string]*dto.MetricFamily
+	buf                  *bufio.Reader // Where the parsed input is read through.
+	err                  error         // Most recent error.
+	lineCount            int           // Tracks the line count for error messages.
+	currentByte          byte          // The most recent byte read.
+	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes.
+	currentMF            *dto.MetricFamily
+	currentMetric        *dto.Metric
+	currentLabelPair     *dto.LabelPair
+	currentLabelPairs    []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
+
+	// The remaining member variables are only used for summaries/histograms.
+	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+	// Summary specific.
+	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentQuantile float64
+	// Histogram specific.
+	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentBucket float64
+	// These tell us if the currently processed line ends on '_count' or
+	// '_sum' respectively and belong to a summary/histogram, representing the sample
+	// count and sum of that summary/histogram.
+	currentIsSummaryCount, currentIsSummarySum     bool
+	currentIsHistogramCount, currentIsHistogramSum bool
+	// These indicate if the metric name from the current line being parsed is inside
+	// braces and if that metric name was found respectively.
+	currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+	// scheme sets the desired ValidationScheme for names. Defaults to the invalid
+	// UnsetValidation.
+	scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+	return TextParser{scheme: nameValidationScheme}
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+	p.reset(in)
+	for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+		// Magic happens here...
+	}
+	// Get rid of empty metric families.
+	for k, mf := range p.metricFamiliesByName {
+		if len(mf.GetMetric()) == 0 {
+			delete(p.metricFamiliesByName, k)
+		}
+	}
+	// If p.err is io.EOF now, we have run into a premature end of the input
+	// stream. Turn this error into something nicer and more
+	// meaningful. (io.EOF is often used as a signal for the legitimate end
+	// of an input stream.)
+	if p.err != nil && errors.Is(p.err, io.EOF) {
+		p.parseError("unexpected end of input stream")
+	}
+	return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+	p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+	p.currentLabelPairs = nil
+	if p.buf == nil {
+		p.buf = bufio.NewReader(in)
+	} else {
+		p.buf.Reset(in)
+	}
+	p.err = nil
+	p.lineCount = 0
+	if p.summaries == nil || len(p.summaries) > 0 {
+		p.summaries = map[uint64]*dto.Metric{}
+	}
+	if p.histograms == nil || len(p.histograms) > 0 {
+		p.histograms = map[uint64]*dto.Metric{}
+	}
+	p.currentQuantile = math.NaN()
+	p.currentBucket = math.NaN()
+	p.currentMF = nil
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+	p.lineCount++
+	p.currentMetricIsInsideBraces = false
+	p.currentMetricInsideBracesIsPresent = false
+	if p.skipBlankTab(); p.err != nil {
+		// This is the only place that we expect to see io.EOF,
+		// which is not an error but the signal that we are done.
+		// Any other error that happens to align with the start of
+		// a line is still an error.
+		if errors.Is(p.err, io.EOF) {
+			p.err = nil
+		}
+		return nil
+	}
+	switch p.currentByte {
+	case '#':
+		return p.startComment
+	case '\n':
+		return p.startOfLine // Empty line, start the next one.
+	case '{':
+		p.currentMetricIsInsideBraces = true
+		return p.readingLabels
+	}
+	return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	// If we have hit the end of line already, there is nothing left
+	// to do. This is not considered a syntax error.
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	keyword := p.currentToken.String()
+	if keyword != "HELP" && keyword != "TYPE" {
+		// Generic comment, ignore by fast forwarding to end of line.
+		for p.currentByte != '\n' {
+			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+				return nil // Unexpected end of input.
+			}
+		}
+		return p.startOfLine
+	}
+	// There is something. Next has to be a metric name.
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	if !isBlankOrTab(p.currentByte) {
+		p.parseError("invalid metric name in comment")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	switch keyword {
+	case "HELP":
+		return p.readingHelp
+	case "TYPE":
+		return p.readingType
+	}
+	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError("invalid metric name")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
+	// Now is the time to fix the type if it hasn't happened yet.
+	if p.currentMF.Type == nil {
+		p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+	}
+	p.currentMetric = &dto.Metric{}
+	// Do not append the newly created currentMetric to
+	// currentMF.Metric right now. First wait if this is a summary,
+	// and the metric exists already, which we can only know after
+	// having read all the labels.
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+	// Summaries/histograms are special. We have to reset the
+	// currentLabels map, currentQuantile and currentBucket before starting to
+	// read labels.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		p.currentLabels = map[string]string{}
+		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+		p.currentQuantile = math.NaN()
+		p.currentBucket = math.NaN()
+	}
+	if p.currentByte != '{' {
+		return p.readingValue
+	}
+	return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '}' {
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	}
+	if p.readTokenAsLabelName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+		return nil
+	}
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '=' {
+		if p.currentMetricIsInsideBraces {
+			if p.currentMetricInsideBracesIsPresent {
+				p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+				return nil
+			}
+			switch p.currentByte {
+			case ',':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetricInsideBracesIsPresent = true
+				return p.startLabelName
+			case '}':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					p.currentLabelPairs = nil
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+				p.currentLabelPairs = nil
+				if p.skipBlankTab(); p.err != nil {
+					return nil // Unexpected end of input.
+				}
+				return p.readingValue
+			default:
+				p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+				return nil
+			}
+		}
+		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+		p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
+	// labels to 'real' labels.
+	if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+		(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
+		p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
+	}
+	// Check for duplicate label names.
+	labels := make(map[string]struct{})
+	for _, l := range p.currentLabelPairs {
+		lName := l.GetName()
+		if _, exists := labels[lName]; exists {
+			p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+			p.currentLabelPairs = nil
+			return nil
+		}
+		labels[lName] = struct{}{}
+	}
+	return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '"' {
+		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+		return nil
+	}
+	if p.readTokenAsLabelValue(); p.err != nil {
+		return nil
+	}
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentLabelPair.Value = proto.String(p.currentToken.String())
+	// Special treatment of summaries:
+	// - Quantile labels are special, will result in dto.Quantile later.
+	// - Other labels have to be added to currentLabels for signature calculation.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+		if p.currentLabelPair.GetName() == model.QuantileLabel {
+			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+				p.currentLabelPairs = nil
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	// Similar special treatment of histograms.
+	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		if p.currentLabelPair.GetName() == model.BucketLabel {
+			if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	switch p.currentByte {
+	case ',':
+		return p.startLabelName
+
+	case '}':
+		if p.currentMF == nil {
+			p.parseError("invalid metric name")
+			return nil
+		}
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	default:
+		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+		p.currentLabelPairs = nil
+		return nil
+	}
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+	// When we are here, we have read all the labels, so for the
+	// special case of a summary/histogram, we can finally find out
+	// if the metric already exists.
+	switch p.currentMF.GetType() {
+	case dto.MetricType_SUMMARY:
+		signature := model.LabelsToSignature(p.currentLabels)
+		if summary := p.summaries[signature]; summary != nil {
+			p.currentMetric = summary
+		} else {
+			p.summaries[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	case dto.MetricType_HISTOGRAM:
+		signature := model.LabelsToSignature(p.currentLabels)
+		if histogram := p.histograms[signature]; histogram != nil {
+			p.currentMetric = histogram
+		} else {
+			p.histograms[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	default:
+		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	value, err := parseFloat(p.currentToken.String())
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+		return nil
+	}
+	switch p.currentMF.GetType() {
+	case dto.MetricType_COUNTER:
+		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+	case dto.MetricType_GAUGE:
+		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+	case dto.MetricType_UNTYPED:
+		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+	case dto.MetricType_SUMMARY:
+		// *sigh*
+		if p.currentMetric.Summary == nil {
+			p.currentMetric.Summary = &dto.Summary{}
+		}
+		switch {
+		case p.currentIsSummaryCount:
+			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsSummarySum:
+			p.currentMetric.Summary.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentQuantile):
+			p.currentMetric.Summary.Quantile = append(
+				p.currentMetric.Summary.Quantile,
+				&dto.Quantile{
+					Quantile: proto.Float64(p.currentQuantile),
+					Value:    proto.Float64(value),
+				},
+			)
+		}
+	case dto.MetricType_HISTOGRAM:
+		// *sigh*
+		if p.currentMetric.Histogram == nil {
+			p.currentMetric.Histogram = &dto.Histogram{}
+		}
+		switch {
+		case p.currentIsHistogramCount:
+			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsHistogramSum:
+			p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentBucket):
+			p.currentMetric.Histogram.Bucket = append(
+				p.currentMetric.Histogram.Bucket,
+				&dto.Bucket{
+					UpperBound:      proto.Float64(p.currentBucket),
+					CumulativeCount: proto.Uint64(uint64(value)),
+				},
+			)
+		}
+	default:
+		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMetric.TimestampMs = proto.Int64(timestamp)
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() > 0 {
+		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+		return nil
+	}
+	return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+	if p.currentMF.Help != nil {
+		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the docstring.
+	if p.readTokenUntilNewline(true); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	p.currentMF.Help = proto.String(p.currentToken.String())
+	return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+	if p.currentMF.Type != nil {
+		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the type.
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+	if !ok {
+		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMF.Type = dto.MetricType(metricType).Enum()
+	return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+	p.err = ParseError{
+		Line: p.lineCount,
+		Msg:  msg,
+	}
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+			return
+		}
+	}
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+	if isBlankOrTab(p.currentByte) {
+		p.skipBlankTab()
+	}
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The
+// first byte considered is the byte already read (now in p.currentByte).  The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+	p.currentToken.Reset()
+	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+		p.currentToken.WriteByte(p.currentByte)
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first
+// byte considered is the byte already read (now in p.currentByte).  The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+	p.currentToken.Reset()
+	escaped := false
+	for p.err == nil {
+		if recognizeEscapeSequence && escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '\n':
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+	p.currentToken.Reset()
+	// A UTF-8 metric name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
+	if !isValidMetricNameStart(p.currentByte) {
+		return
+	}
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+		if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+	p.currentToken.Reset()
+	// A UTF-8 label name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
+	if !isValidLabelNameStart(p.currentByte) {
+		return
+	}
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+		if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+	p.currentToken.Reset()
+	escaped := false
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+			return
+		}
+		if escaped {
+			switch p.currentByte {
+			case '"', '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				p.currentLabelPairs = nil
+				return
+			}
+			escaped = false
+			continue
+		}
+		switch p.currentByte {
+		case '"':
+			return
+		case '\n':
+			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+			return
+		case '\\':
+			escaped = true
+		default:
+			p.currentToken.WriteByte(p.currentByte)
+		}
+	}
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+	p.currentIsSummaryCount = false
+	p.currentIsSummarySum = false
+	p.currentIsHistogramCount = false
+	p.currentIsHistogramSum = false
+	name := p.currentToken.String()
+	if !p.scheme.IsValidMetricName(name) {
+		p.parseError(fmt.Sprintf("invalid metric name %q", name))
+		return
+	}
+	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+		return
+	}
+	// Try out if this is a _sum or _count for a summary/histogram.
+	summaryName := summaryMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+			if isCount(name) {
+				p.currentIsSummaryCount = true
+			}
+			if isSum(name) {
+				p.currentIsSummarySum = true
+			}
+			return
+		}
+	}
+	histogramName := histogramMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+			if isCount(name) {
+				p.currentIsHistogramCount = true
+			}
+			if isSum(name) {
+				p.currentIsHistogramSum = true
+			}
+			return
+		}
+	}
+	p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+	p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
+}
+
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
+}
+
+func isValidMetricNameStart(b byte) bool {
+	return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameContinuation(b, quoted) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+	return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+	return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+	return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+	return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	default:
+		return name
+	}
+}
+
+func histogramMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	case isBucket(name):
+		return name[:len(name)-7]
+	default:
+		return name
+	}
+}
+
+func parseFloat(s string) (float64, error) {
+	if strings.ContainsAny(s, "pP_") {
+		return 0, errors.New("unsupported character in float")
+	}
+	return strconv.ParseFloat(s, 64)
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..460f554
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,162 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+type AlertStatus string
+
+const (
+	AlertFiring   AlertStatus = "firing"
+	AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+	// Label value pairs for purpose of aggregation, matching, and disposition
+	// dispatching. This must minimally include an "alertname" label.
+	Labels LabelSet `json:"labels"`
+
+	// Extra key/value information which does not define alert identity.
+	Annotations LabelSet `json:"annotations"`
+
+	// The known time range for this alert. Both ends are optional.
+	StartsAt     time.Time `json:"startsAt,omitempty"`
+	EndsAt       time.Time `json:"endsAt,omitempty"`
+	GeneratorURL string    `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+	return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+	return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+	s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+	if a.Resolved() {
+		return s + "[resolved]"
+	}
+	return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+	return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true iff the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+	if a.EndsAt.IsZero() {
+		return false
+	}
+	return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+	return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+	if a.ResolvedAt(ts) {
+		return AlertResolved
+	}
+	return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+	if a.StartsAt.IsZero() {
+		return errors.New("start time missing")
+	}
+	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+		return errors.New("start time must be before end time")
+	}
+	if err := a.Labels.Validate(); err != nil {
+		return fmt.Errorf("invalid label set: %w", err)
+	}
+	if len(a.Labels) == 0 {
+		return errors.New("at least one label pair required")
+	}
+	if err := a.Annotations.Validate(); err != nil {
+		return fmt.Errorf("invalid annotations: %w", err)
+	}
+	return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int      { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+	if as[i].StartsAt.Before(as[j].StartsAt) {
+		return true
+	}
+	if as[i].EndsAt.Before(as[j].EndsAt) {
+		return true
+	}
+	return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+	for _, a := range as {
+		if !a.Resolved() {
+			return true
+		}
+	}
+	return false
+}
+
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+	for _, a := range as {
+		if !a.ResolvedAt(ts) {
+			return true
+		}
+	}
+	return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+	if as.HasFiring() {
+		return AlertFiring
+	}
+	return AlertResolved
+}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+	if as.HasFiringAt(ts) {
+		return AlertFiring
+	}
+	return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return 0, err
+	}
+	return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+	return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+	return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+	return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+	f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for k := range s {
+		if _, ok := o[k]; !ok {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+	myLength, otherLength := len(s), len(o)
+	if myLength == 0 || otherLength == 0 {
+		return FingerprintSet{}
+	}
+
+	subSet := s
+	superSet := o
+
+	if otherLength < myLength {
+		subSet = o
+		superSet = s
+	}
+
+	out := FingerprintSet{}
+
+	for k := range subSet {
+		if _, ok := superSet[k]; ok {
+			out[k] = struct{}{}
+		}
+	}
+
+	return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..367afec
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializes a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..dfeb34b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,229 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode/utf8"
+)
+
+const (
+	// AlertNameLabel is the name of the label containing the alert's name.
+	AlertNameLabel = "alertname"
+
+	// ExportedLabelPrefix is the prefix to prepend to the label names present in
+	// exported metrics if a label of the same name is added by the server.
+	ExportedLabelPrefix = "exported_"
+
+	// MetricNameLabel is the label name indicating the metric name of a
+	// timeseries.
+	MetricNameLabel = "__name__"
+	// MetricTypeLabel is the label name indicating the metric type of
+	// timeseries as per the PROM-39 proposal.
+	MetricTypeLabel = "__type__"
+	// MetricUnitLabel is the label name indicating the metric unit of
+	// timeseries as per the PROM-39 proposal.
+	MetricUnitLabel = "__unit__"
+
+	// SchemeLabel is the name of the label that holds the scheme on which to
+	// scrape a target.
+	SchemeLabel = "__scheme__"
+
+	// AddressLabel is the name of the label that holds the address of
+	// a scrape target.
+	AddressLabel = "__address__"
+
+	// MetricsPathLabel is the name of the label that holds the path on which to
+	// scrape a target.
+	MetricsPathLabel = "__metrics_path__"
+
+	// ScrapeIntervalLabel is the name of the label that holds the scrape interval
+	// used to scrape a target.
+	ScrapeIntervalLabel = "__scrape_interval__"
+
+	// ScrapeTimeoutLabel is the name of the label that holds the scrape
+	// timeout used to scrape a target.
+	ScrapeTimeoutLabel = "__scrape_timeout__"
+
+	// ReservedLabelPrefix is a prefix which is not legal in user-supplied
+	// label names.
+	ReservedLabelPrefix = "__"
+
+	// MetaLabelPrefix is a prefix for labels that provide meta information.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series.
+	MetaLabelPrefix = "__meta_"
+
+	// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series. This is reserved for use in
+	// Prometheus configuration files by users.
+	TmpLabelPrefix = "__tmp_"
+
+	// ParamLabelPrefix is a prefix for labels that provide URL parameters
+	// used to scrape a target.
+	ParamLabelPrefix = "__param_"
+
+	// JobLabel is the label name indicating the job from which a timeseries
+	// was scraped.
+	JobLabel = "job"
+
+	// InstanceLabel is the label name used for the instance label.
+	InstanceLabel = "instance"
+
+	// BucketLabel is used for the label that defines the upper bound of a
+	// bucket of a histogram ("le" -> "less or equal").
+	BucketLabel = "le"
+
+	// QuantileLabel is used for the label that defines the quantile in a
+	// summary.
+	QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric.  It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
+func (ln LabelName) IsValid() bool {
+	return NameValidationScheme.IsValidLabelName(string(ln))
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
+func (ln LabelName) IsValidLegacy() bool {
+	return LegacyValidation.IsValidLabelName(string(ln))
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+	return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+	return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+	labelStrings := make([]string, 0, len(l))
+	for _, label := range l {
+		labelStrings = append(labelStrings, string(label))
+	}
+	return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF-8.
+func (lv LabelValue) IsValid() bool {
+	return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+	return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+	return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+	Name  LabelName
+	Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+	return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+	switch {
+	case l[i].Name > l[j].Name:
+		return false
+	case l[i].Name < l[j].Name:
+		return true
+	case l[i].Value > l[j].Value:
+		return false
+	case l[i].Value < l[j].Value:
+		return true
+	default:
+		return false
+	}
+}
+
+func (l LabelPairs) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..9de47b2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,158 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not.  All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+	for ln, lv := range ls {
+		if !ln.IsValid() {
+			return fmt.Errorf("invalid name %q", ln)
+		}
+		if !lv.IsValid() {
+			return fmt.Errorf("invalid value %q", lv)
+		}
+	}
+	return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+	if len(ls) != len(o) {
+		return false
+	}
+	for ln, lv := range ls {
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if olv != lv {
+			return false
+		}
+	}
+	return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+	if len(ls) < len(o) {
+		return true
+	}
+	if len(ls) > len(o) {
+		return false
+	}
+
+	lns := make(LabelNames, 0, len(ls)+len(o))
+	for ln := range ls {
+		lns = append(lns, ln)
+	}
+	for ln := range o {
+		lns = append(lns, ln)
+	}
+	// It's probably not worth it to de-dup lns.
+	sort.Sort(lns)
+	for _, ln := range lns {
+		mlv, ok := ls[ln]
+		if !ok {
+			return true
+		}
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if mlv < olv {
+			return true
+		}
+		if mlv > olv {
+			return false
+		}
+	}
+	return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+	lsn := make(LabelSet, len(ls))
+	for ln, lv := range ls {
+		lsn[ln] = lv
+	}
+	return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+	result := make(LabelSet, len(ls))
+
+	for k, v := range ls {
+		result[k] = v
+	}
+
+	for k, v := range other {
+		result[k] = v
+	}
+
+	return result
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+	return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+	return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
+	var m map[LabelName]LabelValue
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+	// encoding/json only unmarshals maps of the form map[string]T. It treats
+	// LabelName as a string and does not call its UnmarshalJSON method.
+	// Thus, we have to replicate the behavior here.
+	for ln := range m {
+		if !ln.IsValid() {
+			return fmt.Errorf("%q is not a valid label name", ln)
+		}
+	}
+	*ls = LabelSet(m)
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 0000000..abb2c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"bytes"
+	"slices"
+	"strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+	var lna [32]string // On stack to avoid memory allocation for sorting names.
+	labelNames := lna[:0]
+	for name := range l {
+		labelNames = append(labelNames, string(name))
+	}
+	slices.Sort(labelNames)
+	var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+	b := bytes.NewBuffer(bytea[:0])
+	b.WriteByte('{')
+	for i, name := range labelNames {
+		if i > 0 {
+			b.WriteString(", ")
+		}
+		b.WriteString(name)
+		b.WriteByte('=')
+		b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+	}
+	b.WriteByte('}')
+	return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000..447ab8a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..3feebf3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,593 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	dto "github.com/prometheus/client_model/go"
+	"go.yaml.in/yaml/v2"
+	"google.golang.org/protobuf/proto"
+)
+
+var (
+	// NameValidationScheme determines the global default method of the name
+	// validation to be used by all calls to IsValidMetricName() and LabelName
+	// IsValid().
+	//
+	// Deprecated: This variable should not be used and might be removed in the
+	// far future. If you wish to stick to the legacy name validation use
+	// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+	// instead. This variable is here as an escape hatch for emergency cases,
+	// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+	// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+	// the change. In such a case, a temporary assignment to `LegacyValidation`
+	// value in the `init()` function in your main.go or so, could be considered.
+	//
+	// Historically we opted for a global variable for feature gating different
+	// validation schemes in operations that were not otherwise easily adjustable
+	// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+	// Labels structure or package might have been a better choice. Given the
+	// change was made and many upgraded the common already, we live this as-is
+	// with this warning and learning for the future.
+	NameValidationScheme = UTF8Validation
+
+	// NameEscapingScheme defines the default way that names will be escaped when
+	// presented to systems that do not support UTF-8 names. If the Content-Type
+	// "escaping" term is specified, that will override this value.
+	// NameEscapingScheme should not be set to the NoEscaping value. That string
+	// is used in content negotiation to indicate that a system supports UTF-8 and
+	// has that feature enabled.
+	NameEscapingScheme = UnderscoreEscaping
+)
+
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+	// UnsetValidation represents an undefined ValidationScheme.
+	// Should not be used in practice.
+	UnsetValidation ValidationScheme = iota
+
+	// LegacyValidation is a setting that requires that all metric and label names
+	// conform to the original Prometheus character requirements described by
+	// MetricNameRE and LabelNameRE.
+	LegacyValidation
+
+	// UTF8Validation only requires that metric and label names be valid UTF-8
+	// strings.
+	UTF8Validation
+)
+
+var _ interface {
+	yaml.Marshaler
+	yaml.Unmarshaler
+	json.Marshaler
+	json.Unmarshaler
+	fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+	switch s {
+	case UnsetValidation:
+		return "unset"
+	case LegacyValidation:
+		return "legacy"
+	case UTF8Validation:
+		return "utf8"
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+	switch s {
+	case UnsetValidation:
+		return "", nil
+	case LegacyValidation, UTF8Validation:
+		return s.String(), nil
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+	var scheme string
+	if err := unmarshal(&scheme); err != nil {
+		return err
+	}
+	return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+	switch s {
+	case UnsetValidation:
+		return json.Marshal("")
+	case UTF8Validation, LegacyValidation:
+		return json.Marshal(s.String())
+	default:
+		return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+	}
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+	var repr string
+	if err := json.Unmarshal(bytes, &repr); err != nil {
+		return err
+	}
+	return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+	switch text {
+	case "":
+		// Don't change the value.
+	case LegacyValidation.String():
+		*s = LegacyValidation
+	case UTF8Validation.String():
+		*s = UTF8Validation
+	default:
+		return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+	}
+	return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(metricName) == 0 {
+			return false
+		}
+		for i, b := range metricName {
+			if !isValidLegacyRune(b, i) {
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(metricName) == 0 {
+			return false
+		}
+		return utf8.ValidString(metricName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+	}
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(labelName) == 0 {
+			return false
+		}
+		for i, b := range labelName {
+			// TODO: Apply De Morgan's law. Make sure there are tests for this.
+			if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(labelName) == 0 {
+			return false
+		}
+		return utf8.ValidString(labelName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+	}
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+	return "validationScheme"
+}
+
+type EscapingScheme int
+
+const (
+	// NoEscaping indicates that a name will not be escaped. Unescaped names that
+	// do not conform to the legacy validity check will use a new exposition
+	// format syntax that will be officially standardized in future versions.
+	NoEscaping EscapingScheme = iota
+
+	// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+	UnderscoreEscaping
+
+	// DotsEscaping is similar to UnderscoreEscaping, except that dots are
+	// converted to `_dot_` and pre-existing underscores are converted to `__`.
+	DotsEscaping
+
+	// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+	// characters with the unicode value, surrounded by underscores. Single
+	// underscores are replaced with double underscores.
+	ValueEncodingEscaping
+)
+
+const (
+	// EscapingKey is the key in an Accept or Content-Type header that defines how
+	// metric and label names that do not conform to the legacy character
+	// requirements should be escaped when being scraped by a legacy prometheus
+	// system. If a system does not explicitly pass an escaping parameter in the
+	// Accept header, the default NameEscapingScheme will be used.
+	EscapingKey = "escaping"
+
+	// Possible values for Escaping Key.
+	AllowUTF8         = "allow-utf-8" // No escaping required.
+	EscapeUnderscores = "underscores"
+	EscapeDots        = "dots"
+	EscapeValues      = "values"
+)
+
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+	return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+	return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+	clone := make(Metric, len(m))
+	for k, v := range m {
+		clone[k] = v
+	}
+	return clone
+}
+
+func (m Metric) String() string {
+	metricName, hasName := m[MetricNameLabel]
+	numLabels := len(m) - 1
+	if !hasName {
+		numLabels = len(m)
+	}
+	labelStrings := make([]string, 0, numLabels)
+	for label, value := range m {
+		if label != MetricNameLabel {
+			labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+		}
+	}
+
+	switch numLabels {
+	case 0:
+		if hasName {
+			return string(metricName)
+		}
+		return "{}"
+	default:
+		sort.Strings(labelStrings)
+		return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+	}
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+	return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+	return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
+func IsValidMetricName(n LabelValue) bool {
+	return NameValidationScheme.IsValidMetricName(string(n))
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
+func IsValidLegacyMetricName(n string) bool {
+	return LegacyValidation.IsValidMetricName(n)
+}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+	if v == nil {
+		return nil
+	}
+
+	if scheme == NoEscaping {
+		return v
+	}
+
+	out := &dto.MetricFamily{
+		Help: v.Help,
+		Type: v.Type,
+		Unit: v.Unit,
+	}
+
+	// If the name is nil, copy as-is, don't try to escape.
+	if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
+		out.Name = v.Name
+	} else {
+		out.Name = proto.String(EscapeName(v.GetName(), scheme))
+	}
+	for _, m := range v.Metric {
+		if !metricNeedsEscaping(m) {
+			out.Metric = append(out.Metric, m)
+			continue
+		}
+
+		escaped := &dto.Metric{
+			Gauge:       m.Gauge,
+			Counter:     m.Counter,
+			Summary:     m.Summary,
+			Untyped:     m.Untyped,
+			Histogram:   m.Histogram,
+			TimestampMs: m.TimestampMs,
+		}
+
+		for _, l := range m.Label {
+			if l.GetName() == MetricNameLabel {
+				if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
+					escaped.Label = append(escaped.Label, l)
+					continue
+				}
+				escaped.Label = append(escaped.Label, &dto.LabelPair{
+					Name:  proto.String(MetricNameLabel),
+					Value: proto.String(EscapeName(l.GetValue(), scheme)),
+				})
+				continue
+			}
+			if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
+				escaped.Label = append(escaped.Label, l)
+				continue
+			}
+			escaped.Label = append(escaped.Label, &dto.LabelPair{
+				Name:  proto.String(EscapeName(l.GetName(), scheme)),
+				Value: l.Value,
+			})
+		}
+		out.Metric = append(out.Metric, escaped)
+	}
+	return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+	for _, l := range m.Label {
+		if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
+			return true
+		}
+		if !IsValidLegacyMetricName(l.GetName()) {
+			return true
+		}
+	}
+	return false
+}
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	var escaped strings.Builder
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		for i, b := range name {
+			if isValidLegacyRune(b, i) {
+				escaped.WriteRune(b)
+			} else {
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	case DotsEscaping:
+		// Do not early return for legacy valid names, we still escape underscores.
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case b == '.':
+				escaped.WriteString("_dot_")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			default:
+				escaped.WriteString("__")
+			}
+		}
+		return escaped.String()
+	case ValueEncodingEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		escaped.WriteString("U__")
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			case !utf8.ValidRune(b):
+				escaped.WriteString("_FFFD_")
+			default:
+				escaped.WriteRune('_')
+				escaped.WriteString(strconv.FormatInt(int64(b), 16))
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+// lower function taken from strconv.atoi.
+func lower(c byte) byte {
+	return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		// It is not possible to unescape from underscore replacement.
+		return name
+	case DotsEscaping:
+		name = strings.ReplaceAll(name, "_dot_", ".")
+		name = strings.ReplaceAll(name, "__", "_")
+		return name
+	case ValueEncodingEscaping:
+		escapedName, found := strings.CutPrefix(name, "U__")
+		if !found {
+			return name
+		}
+
+		var unescaped strings.Builder
+	TOP:
+		for i := 0; i < len(escapedName); i++ {
+			// All non-underscores are treated normally.
+			if escapedName[i] != '_' {
+				unescaped.WriteByte(escapedName[i])
+				continue
+			}
+			i++
+			if i >= len(escapedName) {
+				return name
+			}
+			// A double underscore is a single underscore.
+			if escapedName[i] == '_' {
+				unescaped.WriteByte('_')
+				continue
+			}
+			// We think we are in a UTF-8 code, process it.
+			var utf8Val uint
+			for j := 0; i < len(escapedName); j++ {
+				// This is too many characters for a utf8 value based on the MaxRune
+				// value of '\U0010FFFF'.
+				if j >= 6 {
+					return name
+				}
+				// Found a closing underscore, convert to a rune, check validity, and append.
+				if escapedName[i] == '_' {
+					utf8Rune := rune(utf8Val)
+					if !utf8.ValidRune(utf8Rune) {
+						return name
+					}
+					unescaped.WriteRune(utf8Rune)
+					continue TOP
+				}
+				r := lower(escapedName[i])
+				utf8Val *= 16
+				switch {
+				case r >= '0' && r <= '9':
+					utf8Val += uint(r) - '0'
+				case r >= 'a' && r <= 'f':
+					utf8Val += uint(r) - 'a' + 10
+				default:
+					return name
+				}
+				i++
+			}
+			// Didn't find closing underscore, invalid.
+			return name
+		}
+		return unescaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+	switch e {
+	case NoEscaping:
+		return AllowUTF8
+	case UnderscoreEscaping:
+		return EscapeUnderscores
+	case DotsEscaping:
+		return EscapeDots
+	case ValueEncodingEscaping:
+		return EscapeValues
+	default:
+		panic(fmt.Sprintf("unknown format scheme %d", e))
+	}
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+	if s == "" {
+		return NoEscaping, errors.New("got empty string instead of escaping scheme")
+	}
+	switch s {
+	case AllowUTF8:
+		return NoEscaping, nil
+	case EscapeUnderscores:
+		return UnderscoreEscaping, nil
+	case EscapeDots:
+		return DotsEscaping, nil
+	case EscapeValues:
+		return ValueEncodingEscaping, nil
+	default:
+		return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
+	}
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..a7b9691
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..dc8a002
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,142 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make([]string, 0, len(labels))
+	for labelName := range labels {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Strings(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, labelName)
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, labels[labelName])
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	labelNames := make(LabelNames, 0, len(ls))
+	for labelName := range ls {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(ls[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	var result uint64
+	for labelName, labelValue := range ls {
+		sum := hashNew()
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(labelValue))
+		result ^= sum
+	}
+	return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	sort.Sort(LabelNames(labels))
+
+	sum := hashNew()
+	for _, label := range labels {
+		sum = hashAdd(sum, string(label))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[label]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+	if len(m) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make(LabelNames, 0, len(m))
+	for labelName := range m {
+		if _, exclude := labels[labelName]; !exclude {
+			labelNames = append(labelNames, labelName)
+		}
+	}
+	if len(labelNames) == 0 {
+		return emptyLabelSignature
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..8f91a97
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+	Name    LabelName `json:"name"`
+	Value   string    `json:"value"`
+	IsRegex bool      `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+	type plain Matcher
+	if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+		return err
+	}
+
+	if len(m.Name) == 0 {
+		return errors.New("label name in matcher must not be empty")
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+	if !m.Name.IsValid() {
+		return fmt.Errorf("invalid name %q", m.Name)
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return fmt.Errorf("invalid regular expression %q", m.Value)
+		}
+	} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+		return fmt.Errorf("invalid value %q", m.Value)
+	}
+	return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+	ID uint64 `json:"id,omitempty"`
+
+	Matchers []*Matcher `json:"matchers"`
+
+	StartsAt time.Time `json:"startsAt"`
+	EndsAt   time.Time `json:"endsAt"`
+
+	CreatedAt time.Time `json:"createdAt,omitempty"`
+	CreatedBy string    `json:"createdBy"`
+	Comment   string    `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+	if len(s.Matchers) == 0 {
+		return errors.New("at least one matcher required")
+	}
+	for _, m := range s.Matchers {
+		if err := m.Validate(); err != nil {
+			return fmt.Errorf("invalid matcher: %w", err)
+		}
+	}
+	if s.StartsAt.IsZero() {
+		return errors.New("start time missing")
+	}
+	if s.EndsAt.IsZero() {
+		return errors.New("end time missing")
+	}
+	if s.EndsAt.Before(s.StartsAt) {
+		return errors.New("start time must be before end time")
+	}
+	if s.CreatedBy == "" {
+		return errors.New("creator information missing")
+	}
+	if s.Comment == "" {
+		return errors.New("comment missing")
+	}
+	if s.CreatedAt.IsZero() {
+		return errors.New("creation timestamp missing")
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..1730b0f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,359 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	// MinimumTick is the minimum supported time resolution. This has to be
+	// at least time.Second in order for the code below to work.
+	minimumTick = time.Millisecond
+	// second is the Time duration equivalent to one second.
+	second = int64(time.Second / minimumTick)
+	// The number of nanoseconds per minimum tick.
+	nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+	// Earliest is the earliest Time representable. Handy for
+	// initializing a high watermark.
+	Earliest = Time(math.MinInt64)
+	// Latest is the latest Time representable. Handy for initializing
+	// a low watermark.
+	Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+	Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+	return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+	return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+	return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+	return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+	return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+	return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+	return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+	return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+	return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+	return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+	return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+	return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	p := strings.Split(string(b), ".")
+	switch len(p) {
+	case 1:
+		v, err := strconv.ParseInt(p[0], 10, 64)
+		if err != nil {
+			return err
+		}
+		*t = Time(v * second)
+
+	case 2:
+		v, err := strconv.ParseInt(p[0], 10, 64)
+		if err != nil {
+			return err
+		}
+		v *= second
+
+		prec := dotPrecision - len(p[1])
+		if prec < 0 {
+			p[1] = p[1][:dotPrecision]
+		} else if prec > 0 {
+			p[1] += strings.Repeat("0", prec)
+		}
+
+		va, err := strconv.ParseInt(p[1], 10, 32)
+		if err != nil {
+			return err
+		}
+
+		// If the value was something like -0.1 the negative is lost in the
+		// parsing because of the leading zero, this ensures that we capture it.
+		if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+			*t = Time(v+va) * -1
+		} else {
+			*t = Time(v + va)
+		}
+
+	default:
+		return fmt.Errorf("invalid time %q", string(b))
+	}
+	return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value.
+func (d *Duration) Set(s string) error {
+	var err error
+	*d, err = ParseDuration(s)
+	return err
+}
+
+// Type implements pflag.Value.
+func (*Duration) Type() string {
+	return "duration"
+}
+
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+	pos  int
+	mult uint64
+}{
+	"ms": {7, uint64(time.Millisecond)},
+	"s":  {6, uint64(time.Second)},
+	"m":  {5, uint64(time.Minute)},
+	"h":  {4, uint64(time.Hour)},
+	"d":  {3, uint64(24 * time.Hour)},
+	"w":  {2, uint64(7 * 24 * time.Hour)},
+	"y":  {1, uint64(365 * 24 * time.Hour)},
+}
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+// Negative durations are not supported.
+func ParseDuration(s string) (Duration, error) {
+	switch s {
+	case "0":
+		// Allow 0 without a unit.
+		return 0, nil
+	case "":
+		return 0, errors.New("empty duration string")
+	}
+
+	orig := s
+	var dur uint64
+	lastUnitPos := 0
+
+	for s != "" {
+		if !isdigit(s[0]) {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		// Consume [0-9]*
+		i := 0
+		for ; i < len(s) && isdigit(s[i]); i++ {
+		}
+		v, err := strconv.ParseUint(s[:i], 10, 0)
+		if err != nil {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		s = s[i:]
+
+		// Consume unit.
+		for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+		}
+		if i == 0 {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		u := s[:i]
+		s = s[i:]
+		unit, ok := unitMap[u]
+		if !ok {
+			return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+		}
+		if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		lastUnitPos = unit.pos
+		// Check if the provided duration overflows time.Duration (> ~ 290years).
+		if v > 1<<63/unit.mult {
+			return 0, errors.New("duration out of range")
+		}
+		dur += v * unit.mult
+		if dur > 1<<63-1 {
+			return 0, errors.New("duration out of range")
+		}
+	}
+
+	return Duration(dur), nil
+}
+
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+	if s == "" || s[0] != '-' {
+		return ParseDuration(s)
+	}
+
+	d, err := ParseDuration(s[1:])
+
+	return -d, err
+}
+
+func (d Duration) String() string {
+	var (
+		ms   = int64(time.Duration(d) / time.Millisecond)
+		r    = ""
+		sign = ""
+	)
+
+	if ms == 0 {
+		return "0s"
+	}
+
+	if ms < 0 {
+		sign, ms = "-", -ms
+	}
+
+	f := func(unit string, mult int64, exact bool) {
+		if exact && ms%mult != 0 {
+			return
+		}
+		if v := ms / mult; v > 0 {
+			r += fmt.Sprintf("%d%s", v, unit)
+			ms -= v * mult
+		}
+	}
+
+	// Only format years and weeks if the remainder is zero, as it is often
+	// easier to read 90d than 12w6d.
+	f("y", 1000*60*60*24*365, true)
+	f("w", 1000*60*60*24*7, true)
+
+	f("d", 1000*60*60*24, false)
+	f("h", 1000*60*60, false)
+	f("m", 1000*60, false)
+	f("s", 1000, false)
+	f("ms", 1, false)
+
+	return sign + r
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.String())
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *Duration) UnmarshalJSON(bytes []byte) error {
+	var s string
+	if err := json.Unmarshal(bytes, &s); err != nil {
+		return err
+	}
+	dur, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = dur
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (d *Duration) MarshalText() ([]byte, error) {
+	return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (d *Duration) UnmarshalText(text []byte) error {
+	var err error
+	*d, err = ParseDuration(string(text))
+	return err
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+	return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	dur, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = dur
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..a9995a3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
+
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
+type Sample struct {
+	Metric    Metric           `json:"metric"`
+	Value     SampleValue      `json:"value"`
+	Timestamp Time             `json:"timestamp"`
+	Histogram *SampleHistogram `json:"histogram"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+	if s == o {
+		return true
+	}
+
+	if !s.Metric.Equal(o.Metric) {
+		return false
+	}
+	if !s.Timestamp.Equal(o.Timestamp) {
+		return false
+	}
+	if s.Histogram != nil {
+		return s.Histogram.Equal(o.Histogram)
+	}
+	return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+	if s.Histogram != nil {
+		return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		})
+	}
+	return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+		Timestamp: s.Timestamp,
+		Value:     s.Value,
+	})
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+	if s.Histogram != nil {
+		v := struct {
+			Metric    Metric              `json:"metric"`
+			Histogram SampleHistogramPair `json:"histogram"`
+		}{
+			Metric: s.Metric,
+			Histogram: SampleHistogramPair{
+				Timestamp: s.Timestamp,
+				Histogram: s.Histogram,
+			},
+		}
+		return json.Marshal(&v)
+	}
+	v := struct {
+		Metric Metric     `json:"metric"`
+		Value  SamplePair `json:"value"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+	}
+	return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric    Metric              `json:"metric"`
+		Value     SamplePair          `json:"value"`
+		Histogram SampleHistogramPair `json:"histogram"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+		Histogram: SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		},
+	}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	s.Metric = v.Metric
+	if v.Histogram.Histogram != nil {
+		s.Timestamp = v.Histogram.Timestamp
+		s.Histogram = v.Histogram.Histogram
+	} else {
+		s.Timestamp = v.Value.Timestamp
+		s.Value = v.Value.Value
+	}
+
+	return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+	return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+	switch {
+	case s[i].Metric.Before(s[j].Metric):
+		return true
+	case s[j].Metric.Before(s[i].Metric):
+		return false
+	case s[i].Timestamp.Before(s[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+func (s Samples) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, sample := range s {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+	Metric     Metric                `json:"metric"`
+	Values     []SamplePair          `json:"values"`
+	Histograms []SampleHistogramPair `json:"histograms"`
+}
+
+func (ss SampleStream) String() string {
+	valuesLength := len(ss.Values)
+	vals := make([]string, valuesLength+len(ss.Histograms))
+	for i, v := range ss.Values {
+		vals[i] = v.String()
+	}
+	for i, v := range ss.Histograms {
+		vals[i+valuesLength] = v.String()
+	}
+	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+	switch {
+	case len(ss.Histograms) > 0 && len(ss.Values) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Values     []SamplePair          `json:"values"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Values:     ss.Values,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	case len(ss.Histograms) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	default:
+		v := struct {
+			Metric Metric       `json:"metric"`
+			Values []SamplePair `json:"values"`
+		}{
+			Metric: ss.Metric,
+			Values: ss.Values,
+		}
+		return json.Marshal(&v)
+	}
+}
+
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric     Metric                `json:"metric"`
+		Values     []SamplePair          `json:"values"`
+		Histograms []SampleHistogramPair `json:"histograms"`
+	}{
+		Metric:     ss.Metric,
+		Values:     ss.Values,
+		Histograms: ss.Histograms,
+	}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	ss.Metric = v.Metric
+	ss.Values = v.Values
+	ss.Histograms = v.Histograms
+
+	return nil
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+	Value     SampleValue `json:"value"`
+	Timestamp Time        `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+	return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+	v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+	return json.Marshal([...]interface{}{s.Timestamp, v})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+	var f string
+	v := [...]interface{}{&s.Timestamp, &f}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	value, err := strconv.ParseFloat(f, 64)
+	if err != nil {
+		return fmt.Errorf("error parsing sample value: %w", err)
+	}
+	s.Value = SampleValue(value)
+	return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+	Value     string `json:"value"`
+	Timestamp Time   `json:"timestamp"`
+}
+
+func (s *String) String() string {
+	return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+	return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+	v := [...]interface{}{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+	entries := make([]string, len(vec))
+	for i, s := range vec {
+		entries[i] = s.String()
+	}
+	return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int      { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+	switch {
+	case vec[i].Metric.Before(vec[j].Metric):
+		return true
+	case vec[j].Metric.Before(vec[i].Metric):
+		return false
+	case vec[i].Timestamp.Before(vec[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+	if len(vec) != len(o) {
+		return false
+	}
+
+	for i, sample := range vec {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int           { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }
+
+func (m Matrix) String() string {
+	matCp := make(Matrix, len(m))
+	copy(matCp, m)
+	sort.Sort(matCp)
+
+	strs := make([]string, len(matCp))
+
+	for i, ss := range matCp {
+		strs[i] = ss.String()
+	}
+
+	return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000..6bfc757
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("sample value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = SampleValue(f)
+	return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+	if v == o {
+		return true
+	}
+	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+	Timestamp Time
+	Value     SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Value)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000..91ce5b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,179 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("float value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = FloatString(f)
+	return nil
+}
+
+type HistogramBucket struct {
+	Boundaries int32
+	Lower      FloatString
+	Upper      FloatString
+	Count      FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+	b, err := json.Marshal(s.Boundaries)
+	if err != nil {
+		return nil, err
+	}
+	l, err := json.Marshal(s.Lower)
+	if err != nil {
+		return nil, err
+	}
+	u, err := json.Marshal(s.Upper)
+	if err != nil {
+		return nil, err
+	}
+	c, err := json.Marshal(s.Count)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+	return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (s HistogramBucket) String() string {
+	var sb strings.Builder
+	lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+	upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
+	if lowerInclusive {
+		sb.WriteRune('[')
+	} else {
+		sb.WriteRune('(')
+	}
+	fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
+	if upperInclusive {
+		sb.WriteRune(']')
+	} else {
+		sb.WriteRune(')')
+	}
+	fmt.Fprintf(&sb, ":%v", s.Count)
+	return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, bucket := range s {
+		if !bucket.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+type SampleHistogram struct {
+	Count   FloatString      `json:"count"`
+	Sum     FloatString      `json:"sum"`
+	Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+	return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+	return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+	Timestamp Time
+	// Histogram should never be nil, it's only stored as pointer for efficiency.
+	Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+	if s.Histogram == nil {
+		return nil, errors.New("histogram is nil")
+	}
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Histogram)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Timestamp, &s.Histogram}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	if s.Histogram == nil {
+		return errors.New("histogram is null")
+	}
+	return nil
+}
+
+func (s SampleHistogramPair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+	return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000..078910f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+	Type() ValueType
+	String() string
+}
+
+func (Matrix) Type() ValueType  { return ValMatrix }
+func (Vector) Type() ValueType  { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+	ValNone ValueType = iota
+	ValScalar
+	ValVector
+	ValMatrix
+	ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+	return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	switch s {
+	case "<ValNone>":
+		*et = ValNone
+	case "scalar":
+		*et = ValScalar
+	case "vector":
+		*et = ValVector
+	case "matrix":
+		*et = ValMatrix
+	case "string":
+		*et = ValString
+	default:
+		return fmt.Errorf("unknown value type %q", s)
+	}
+	return nil
+}
+
+func (et ValueType) String() string {
+	switch et {
+	case ValNone:
+		return "<ValNone>"
+	case ValScalar:
+		return "scalar"
+	case ValVector:
+		return "vector"
+	case ValMatrix:
+		return "matrix"
+	case ValString:
+		return "string"
+	}
+	panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 0000000..7cc33ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1,2 @@
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
new file mode 100644
index 0000000..3c3bf91
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -0,0 +1,45 @@
+version: "2"
+linters:
+  enable:
+    - forbidigo
+    - godot
+    - misspell
+    - revive
+    - testifylint
+  settings:
+    forbidigo:
+      forbid:
+        - pattern: ^fmt\.Print.*$
+          msg: Do not commit print statements.
+    godot:
+      exclude:
+        # Ignore "See: URL".
+        - 'See:'
+      capital: true
+    misspell:
+      locale: US
+  exclusions:
+    generated: lax
+    presets:
+      - comments
+      - common-false-positives
+      - legacy
+      - std-error-handling
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
+formatters:
+  enable:
+    - gofmt
+    - goimports
+  settings:
+    goimports:
+      local-prefixes:
+        - github.com/prometheus/procfs
+  exclusions:
+    generated: lax
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..d325872
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Prometheus Community Code of Conduct
+
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..853eb9d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,121 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+  addressing (with `@...`) a suitable maintainer of this repository (see
+  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+  This will avoid unnecessary work and surely give you and us a good deal
+  of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
+
+* Relevant coding style guidelines are the [Go Code Review
+  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+  and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+  Practices for Production
+  Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
+
+* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
+
+## Steps to Contribute
+
+Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
+
+Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
+
+For quickly compiling and testing your changes do:
+```
+make test         # Make sure all the tests pass before you commit and push :)
+```
+
+We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
+
+## Pull Request Checklist
+
+* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
+
+* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
+
+* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
+
+* Add tests relevant to the fixed bug or new feature.
+
+## Dependency management
+
+The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
+
+All dependencies are vendored in the `vendor/` directory.
+
+To add or update a new dependency, use the `go get` command:
+
+```bash
+# Pick the latest tagged release.
+go get example.com/some/module/pkg
+
+# Pick a specific version.
+go get example.com/some/module/pkg@vX.Y.Z
+```
+
+Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
+
+
+```bash
+# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
+GO111MODULE=on go mod tidy
+
+GO111MODULE=on go mod vendor
+```
+
+You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
+
+
+## API Implementation Guidelines
+
+### Naming and Documentation
+
+Public functions and structs should normally be named according to the file(s) being read and parsed.  For example, 
+the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`.  In addition, the godoc for each public function
+should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
+
+### Reading vs. Parsing
+
+Most functionality in this library consists of reading files and then parsing the text into structured data.  In most
+cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and 
+a private `parseThing(r Reader)` function.  This provides a logical separation and allows parsing to be tested
+directly without the need to read from the filesystem.  Using a `Reader` argument is preferred over other data types
+such as `string` or `*File` because it provides the most flexibility regarding the data source.  When a set of files 
+in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
+
+### /proc and /sys filesystem I/O 
+
+The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.  
+Many of the files are changing continuously and the data being read can in some cases change between subsequent 
+reads in the same file.  Also, most of the files are relatively small (less than a few KBs), and system calls
+to the `stat` function will often return the wrong size.  Therefore, for most files it's recommended to read the 
+full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
+the file.
+
+Note that parsing the file's contents can still be performed one line at a time.  This is done by first reading 
+the full file, and then using a scanner on the `[]byte` or `string` containing the data.
+
+```
+    data, err := util.ReadFileNoStat("/proc/cpuinfo")
+    if err != nil {
+        return err
+    }
+    reader := bytes.NewReader(data)
+    scanner := bufio.NewScanner(reader)
+```
+
+The `/sys` filesystem contains many very small files which contain only a single numeric or text value.  These files
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
+not bother to check the size of the file before reading.
+```
+    data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
+```
+
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 0000000..e00f3b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1,3 @@
+* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
+* Paul Gier <paulgier@gmail.com> @pgier
+* Ben Kochie <superq@gmail.com> @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000..7edfe4d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,31 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include Makefile.common
+
+%/.unpacked: %.ttar
+	@echo ">> extracting fixtures $*"
+	./ttar -C $(dir $*) -x -f $*.ttar
+	touch $@
+
+fixtures: testdata/fixtures/.unpacked
+
+update_fixtures:
+	rm -vf testdata/fixtures/.unpacked
+	./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
+
+.PHONY: build
+build:
+
+.PHONY: test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
new file mode 100644
index 0000000..0ed55c2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -0,0 +1,283 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# A common Makefile that includes rules to be reused in different prometheus projects.
+# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
+
+# Example usage :
+# Create the main Makefile in the root project directory.
+# include Makefile.common
+# customTarget:
+# 	@echo ">> Running customTarget"
+#
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO           ?= go
+GOFMT        ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+GOOPTS       ?=
+GOHOSTOS     ?= $(shell $(GO) env GOHOSTOS)
+GOHOSTARCH   ?= $(shell $(GO) env GOHOSTARCH)
+
+GO_VERSION        ?= $(shell $(GO) version)
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
+PRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
+
+PROMU        := $(FIRST_GOPATH)/bin/promu
+pkgs          = ./...
+
+ifeq (arm, $(GOHOSTARCH))
+	GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
+	GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
+else
+	GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
+endif
+
+GOTEST := $(GO) test
+GOTEST_DIR :=
+ifneq ($(CIRCLE_JOB),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
+	GOTEST_DIR := test-results
+	GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
+endif
+endif
+
+PROMU_VERSION ?= 0.17.0
+PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+
+SKIP_GOLANGCI_LINT :=
+GOLANGCI_LINT :=
+GOLANGCI_LINT_OPTS ?=
+GOLANGCI_LINT_VERSION ?= v2.0.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
+# windows isn't included here because of the path separator being different.
+ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
+	ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
+		# If we're in CI and there is an Actions file, that means the linter
+		# is being run in Actions, so we don't need to run it here.
+		ifneq (,$(SKIP_GOLANGCI_LINT))
+			GOLANGCI_LINT :=
+		else ifeq (,$(CIRCLE_JOB))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		endif
+	endif
+endif
+
+PREFIX                  ?= $(shell pwd)
+BIN_DIR                 ?= $(shell pwd)
+DOCKER_IMAGE_TAG        ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
+DOCKERFILE_PATH         ?= ./Dockerfile
+DOCKERBUILD_CONTEXT     ?= ./
+DOCKER_REPO             ?= prom
+
+DOCKER_ARCHS            ?= amd64
+
+BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
+PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
+TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
+ifeq ($(GOHOSTARCH),amd64)
+        ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
+                # Only supported on amd64
+                test-flags := -race
+        endif
+endif
+
+# This rule is used to forward a target like "build" to "common-build".  This
+# allows a new "build" target to be defined in a Makefile which includes this
+# one and override "common-build" without override warnings.
+%: common-% ;
+
+.PHONY: common-all
+common-all: precheck style check_license lint yamllint unused build test
+
+.PHONY: common-style
+common-style:
+	@echo ">> checking code style"
+	@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
+	if [ -n "$${fmtRes}" ]; then \
+		echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
+		echo "Please ensure you are using $$($(GO) version) for formatting code."; \
+		exit 1; \
+	fi
+
+.PHONY: common-check_license
+common-check_license:
+	@echo ">> checking license header"
+	@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
+               awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
+       done); \
+       if [ -n "$${licRes}" ]; then \
+               echo "license header checking failed:"; echo "$${licRes}"; \
+               exit 1; \
+       fi
+
+.PHONY: common-deps
+common-deps:
+	@echo ">> getting dependencies"
+	$(GO) mod download
+
+.PHONY: update-go-deps
+update-go-deps:
+	@echo ">> updating Go dependencies"
+	@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
+		$(GO) get -d $$m; \
+	done
+	$(GO) mod tidy
+
+.PHONY: common-test-short
+common-test-short: $(GOTEST_DIR)
+	@echo ">> running short tests"
+	$(GOTEST) -short $(GOOPTS) $(pkgs)
+
+.PHONY: common-test
+common-test: $(GOTEST_DIR)
+	@echo ">> running all tests"
+	$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+
+$(GOTEST_DIR):
+	@mkdir -p $@
+
+.PHONY: common-format
+common-format:
+	@echo ">> formatting code"
+	$(GO) fmt $(pkgs)
+
+.PHONY: common-vet
+common-vet:
+	@echo ">> vetting code"
+	$(GO) vet $(GOOPTS) $(pkgs)
+
+.PHONY: common-lint
+common-lint: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+	@echo ">> running golangci-lint"
+	$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+	@echo ">> running golangci-lint fix"
+	$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-yamllint
+common-yamllint:
+	@echo ">> running yamllint on all YAML files in the repository"
+ifeq (, $(shell command -v yamllint 2> /dev/null))
+	@echo "yamllint not installed so skipping"
+else
+	yamllint .
+endif
+
+# For backward-compatibility.
+.PHONY: common-staticcheck
+common-staticcheck: lint
+
+.PHONY: common-unused
+common-unused:
+	@echo ">> running check for unused/missing packages in go.mod"
+	$(GO) mod tidy
+	@git diff --exit-code -- go.sum go.mod
+
+.PHONY: common-build
+common-build: promu
+	@echo ">> building binaries"
+	$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+
+.PHONY: common-tarball
+common-tarball: promu
+	@echo ">> building release tarball"
+	$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+	@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
+.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
+common-docker: $(BUILD_DOCKER_ARCHS)
+$(BUILD_DOCKER_ARCHS): common-docker-%:
+	docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
+		-f $(DOCKERFILE_PATH) \
+		--build-arg ARCH="$*" \
+		--build-arg OS="linux" \
+		$(DOCKERBUILD_CONTEXT)
+
+.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
+common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
+$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
+	docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
+.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
+common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
+$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+
+.PHONY: common-docker-manifest
+common-docker-manifest:
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+.PHONY: promu
+promu: $(PROMU)
+
+$(PROMU):
+	$(eval PROMU_TMP := $(shell mktemp -d))
+	curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
+	mkdir -p $(FIRST_GOPATH)/bin
+	cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
+	rm -r $(PROMU_TMP)
+
+.PHONY: proto
+proto:
+	@echo ">> generating code from proto files"
+	@./scripts/genproto.sh
+
+ifdef GOLANGCI_LINT
+$(GOLANGCI_LINT):
+	mkdir -p $(FIRST_GOPATH)/bin
+	curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
+		| sed -e '/install -d/d' \
+		| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
+endif
+
+.PHONY: precheck
+precheck::
+
+define PRECHECK_COMMAND_template =
+precheck:: $(1)_precheck
+
+PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
+.PHONY: $(1)_precheck
+$(1)_precheck:
+	@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
+		echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
+		exit 1; \
+	fi
+endef
+
+govulncheck: install-govulncheck
+	govulncheck ./...
+
+install-govulncheck:
+	command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..0718239
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,61 @@
+# procfs
+
+This package provides functions to retrieve system, kernel, and process
+metrics from the pseudo-filesystems /proc and /sys.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
+[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
+
+## Usage
+
+The procfs library is organized by packages based on whether the gathered data is coming from
+/proc, /sys, or both.  Each package contains an `FS` type which represents the path to either /proc, 
+/sys, or both.  For example, cpu statistics are gathered from
+`/proc/stat` and are available via the root procfs package.  First, the proc filesystem mount
+point is initialized, and then the stat information is read.
+
+```go
+fs, err := procfs.NewFS("/proc")
+stats, err := fs.Stat()
+```
+
+Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
+
+```go
+    fs, err := blockdevice.NewFS("/proc", "/sys")
+    stats, err := fs.ProcDiskstats()
+```
+
+## Package Organization
+
+The packages in this project are organized according to (1) whether the data comes from the `/proc` or
+`/sys` filesystem and (2) the type of information being retrieved.  For example, most process information
+can be gathered from the functions in the root `procfs` package.  Information about block devices such as disk drives
+is available in the `blockdevices` sub-package.
+
+## Building and Testing
+
+The procfs library is intended to be built as part of another application, so there are no distributable binaries.  
+However, most of the API includes unit tests which can be run with `make test`.
+
+### Updating Test Fixtures
+
+The procfs library includes a set of test fixtures which include many example files from
+the `/proc` and `/sys` filesystems.  These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
+which is extracted automatically during testing.  To add/update the test fixtures, first
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
+
+```bash
+rm -rf testdata/fixtures
+make test
+```
+
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory.  When
+the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
+based on the updated `fixtures` directory.  And finally, verify the changes using
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
new file mode 100644
index 0000000..fed02d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -0,0 +1,6 @@
+# Reporting a security issue
+
+The Prometheus security policy, including how to report vulnerabilities, can be
+found here:
+
+<https://prometheus.io/docs/operating/security/>
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
new file mode 100644
index 0000000..2e53344
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Learned from include/uapi/linux/if_arp.h.
+const (
+	// Completed entry (ha valid).
+	ATFComplete = 0x02
+	// Permanent entry.
+	ATFPermanent = 0x04
+	// Publish entry.
+	ATFPublish = 0x08
+	// Has requested trailers.
+	ATFUseTrailers = 0x10
+	// Obsoleted: Want to use a netmask (only for proxy entries).
+	ATFNetmask = 0x20
+	// Don't answer this addresses.
+	ATFDontPublish = 0x40
+)
+
+// ARPEntry contains a single row of the columnar data represented in
+// /proc/net/arp.
+type ARPEntry struct {
+	// IP address
+	IPAddr net.IP
+	// MAC address
+	HWAddr net.HardwareAddr
+	// Name of the device
+	Device string
+	// Flags
+	Flags byte
+}
+
+// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
+// and then return a slice of ARPEntry's.
+func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
+	data, err := os.ReadFile(fs.proc.Path("net/arp"))
+	if err != nil {
+		return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
+	}
+
+	return parseARPEntries(data)
+}
+
+func parseARPEntries(data []byte) ([]ARPEntry, error) {
+	lines := strings.Split(string(data), "\n")
+	entries := make([]ARPEntry, 0)
+	var err error
+	const (
+		expectedDataWidth   = 6
+		expectedHeaderWidth = 9
+	)
+	for _, line := range lines {
+		columns := strings.Fields(line)
+		width := len(columns)
+
+		if width == expectedHeaderWidth || width == 0 {
+			continue
+		} else if width == expectedDataWidth {
+			entry, err := parseARPEntry(columns)
+			if err != nil {
+				return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
+			}
+			entries = append(entries, entry)
+		} else {
+			return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
+		}
+
+	}
+
+	return entries, err
+}
+
+func parseARPEntry(columns []string) (ARPEntry, error) {
+	entry := ARPEntry{Device: columns[5]}
+	ip := net.ParseIP(columns[0])
+	entry.IPAddr = ip
+
+	if mac, err := net.ParseMAC(columns[3]); err == nil {
+		entry.HWAddr = mac
+	} else {
+		return ARPEntry{}, err
+	}
+
+	if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+		entry.Flags = byte(flags)
+	} else {
+		return ARPEntry{}, err
+	}
+
+	return entry, nil
+}
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+	return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 0000000..8380750
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+	Node  string
+	Zone  string
+	Sizes []float64
+}
+
+// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+	file, err := os.Open(fs.proc.Path("buddyinfo"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+	var (
+		buddyInfo   = []BuddyInfo{}
+		scanner     = bufio.NewScanner(r)
+		bucketCount = -1
+	)
+
+	for scanner.Scan() {
+		var err error
+		line := scanner.Text()
+		parts := strings.Fields(line)
+
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
+		}
+
+		node := strings.TrimSuffix(parts[1], ",")
+		zone := strings.TrimSuffix(parts[3], ",")
+		arraySize := len(parts[4:])
+
+		if bucketCount == -1 {
+			bucketCount = arraySize
+		} else {
+			if bucketCount != arraySize {
+				return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
+			}
+		}
+
+		sizes := make([]float64, arraySize)
+		for i := 0; i < arraySize; i++ {
+			sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+			if err != nil {
+				return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
+			}
+		}
+
+		buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+	}
+
+	return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
new file mode 100644
index 0000000..bf4f3b4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cmdline.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CmdLine returns the command line of the kernel.
+func (fs FS) CmdLine() ([]string, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+
+	return strings.Fields(string(data)), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
new file mode 100644
index 0000000..f0950bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -0,0 +1,519 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
+type CPUInfo struct {
+	Processor       uint
+	VendorID        string
+	CPUFamily       string
+	Model           string
+	ModelName       string
+	Stepping        string
+	Microcode       string
+	CPUMHz          float64
+	CacheSize       string
+	PhysicalID      string
+	Siblings        uint
+	CoreID          string
+	CPUCores        uint
+	APICID          string
+	InitialAPICID   string
+	FPU             string
+	FPUException    string
+	CPUIDLevel      uint
+	WP              string
+	Flags           []string
+	Bugs            []string
+	BogoMips        float64
+	CLFlushSize     uint
+	CacheAlignment  uint
+	AddressSizes    string
+	PowerManagement string
+}
+
+var (
+	cpuinfoClockRegexp          = regexp.MustCompile(`([\d.]+)`)
+	cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
+)
+
+// CPUInfo returns information about current system CPUs.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) CPUInfo() ([]CPUInfo, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCPUInfo(data)
+}
+
+func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse  line: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "vendor", "vendor_id":
+			cpuinfo[i].VendorID = field[1]
+		case "cpu family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "model":
+			cpuinfo[i].Model = field[1]
+		case "model name":
+			cpuinfo[i].ModelName = field[1]
+		case "stepping":
+			cpuinfo[i].Stepping = field[1]
+		case "microcode":
+			cpuinfo[i].Microcode = field[1]
+		case "cpu MHz":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		case "cache size":
+			cpuinfo[i].CacheSize = field[1]
+		case "physical id":
+			cpuinfo[i].PhysicalID = field[1]
+		case "siblings":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Siblings = uint(v)
+		case "core id":
+			cpuinfo[i].CoreID = field[1]
+		case "cpu cores":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUCores = uint(v)
+		case "apicid":
+			cpuinfo[i].APICID = field[1]
+		case "initial apicid":
+			cpuinfo[i].InitialAPICID = field[1]
+		case "fpu":
+			cpuinfo[i].FPU = field[1]
+		case "fpu_exception":
+			cpuinfo[i].FPUException = field[1]
+		case "cpuid level":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUIDLevel = uint(v)
+		case "wp":
+			cpuinfo[i].WP = field[1]
+		case "flags":
+			cpuinfo[i].Flags = strings.Fields(field[1])
+		case "bugs":
+			cpuinfo[i].Bugs = strings.Fields(field[1])
+		case "bogomips":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		case "clflush size":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CLFlushSize = uint(v)
+		case "cache_alignment":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CacheAlignment = uint(v)
+		case "address sizes":
+			cpuinfo[i].AddressSizes = field[1]
+		case "power management":
+			cpuinfo[i].PowerManagement = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
+	if !match || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	featuresLine := ""
+	commonCPUInfo := CPUInfo{}
+	i := 0
+	if strings.TrimSpace(field[0]) == "Processor" {
+		commonCPUInfo = CPUInfo{ModelName: field[1]}
+		i = -1
+	} else {
+		v, err := strconv.ParseUint(field[1], 0, 32)
+		if err != nil {
+			return nil, err
+		}
+		firstcpu := CPUInfo{Processor: uint(v)}
+		cpuinfo = []CPUInfo{firstcpu}
+	}
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "BogoMIPS":
+			if i == -1 {
+				cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
+				i++
+				cpuinfo[i].Processor = 0
+			}
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		case "Features":
+			featuresLine = line
+		case "model name":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	fields := strings.SplitN(featuresLine, ": ", 2)
+	for i := range cpuinfo {
+		cpuinfo[i].Flags = strings.Fields(fields[1])
+	}
+	return cpuinfo, nil
+
+}
+
+func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	commonCPUInfo := CPUInfo{VendorID: field[1]}
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "bogomips per cpu":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			commonCPUInfo.BogoMips = v
+		case "features":
+			commonCPUInfo.Flags = strings.Fields(field[1])
+		}
+		if strings.HasPrefix(line, "processor") {
+			match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
+			if len(match) < 2 {
+				return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+			}
+			cpu := commonCPUInfo
+			v, err := strconv.ParseUint(match[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpu.Processor = uint(v)
+			cpuinfo = append(cpuinfo, cpu)
+		}
+		if strings.HasPrefix(line, "cpu number") {
+			break
+		}
+	}
+
+	i := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "cpu number":
+			i++
+		case "cpu MHz dynamic":
+			clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+			v, err := strconv.ParseFloat(clock, 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		case "physical id":
+			cpuinfo[i].PhysicalID = field[1]
+		case "core id":
+			cpuinfo[i].CoreID = field[1]
+		case "cpu cores":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUCores = uint(v)
+		case "siblings":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Siblings = uint(v)
+		}
+	}
+
+	return cpuinfo, nil
+}
+
+func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	systemType := field[1]
+
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+			cpuinfo[i].VendorID = systemType
+		case "cpu model":
+			cpuinfo[i].ModelName = field[1]
+		case "BogoMIPS":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	systemType := field[1]
+	i := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+			cpuinfo[i].VendorID = systemType
+		case "CPU Family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "Model Name":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "cpu":
+			cpuinfo[i].VendorID = field[1]
+		case "clock":
+			clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+			v, err := strconv.ParseFloat(clock, 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+		case "hart":
+			cpuinfo[i].CoreID = field[1]
+		case "isa":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
+	return nil, errors.New("not implemented")
+}
+
+// firstNonEmptyLine advances the scanner to the first non-empty line
+// and returns the contents of that line.
+func firstNonEmptyLine(scanner *bufio.Scanner) string {
+	for scanner.Scan() {
+		line := scanner.Text()
+		if strings.TrimSpace(line) != "" {
+			return line
+		}
+	}
+	return ""
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
new file mode 100644
index 0000000..64cfd53
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (arm || arm64)
+// +build linux
+// +build arm arm64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000..d88442f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
new file mode 100644
index 0000000..c11207f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (mips || mipsle || mips64 || mips64le)
+// +build linux
+// +build mips mipsle mips64 mips64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
new file mode 100644
index 0000000..a6b2b31
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoDummy
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
new file mode 100644
index 0000000..003bc2a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (ppc64 || ppc64le)
+// +build linux
+// +build ppc64 ppc64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
new file mode 100644
index 0000000..1c9b731
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (riscv || riscv64)
+// +build linux
+// +build riscv riscv64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
new file mode 100644
index 0000000..fa3686bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
new file mode 100644
index 0000000..a0ef555
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (386 || amd64)
+// +build linux
+// +build 386 amd64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
new file mode 100644
index 0000000..5f2a37a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Crypto holds info parsed from /proc/crypto.
+type Crypto struct {
+	Alignmask   *uint64
+	Async       bool
+	Blocksize   *uint64
+	Chunksize   *uint64
+	Ctxsize     *uint64
+	Digestsize  *uint64
+	Driver      string
+	Geniv       string
+	Internal    string
+	Ivsize      *uint64
+	Maxauthsize *uint64
+	MaxKeysize  *uint64
+	MinKeysize  *uint64
+	Module      string
+	Name        string
+	Priority    *int64
+	Refcnt      *int64
+	Seedsize    *uint64
+	Selftest    string
+	Type        string
+	Walksize    *uint64
+}
+
+// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
+func (fs FS) Crypto() ([]Crypto, error) {
+	path := fs.proc.Path("crypto")
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
+	}
+
+	crypto, err := parseCrypto(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
+	}
+
+	return crypto, nil
+}
+
+// parseCrypto parses a /proc/crypto stream into Crypto elements.
+func parseCrypto(r io.Reader) ([]Crypto, error) {
+	var out []Crypto
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		text := s.Text()
+		switch {
+		case strings.HasPrefix(text, "name"):
+			// Each crypto element begins with its name.
+			out = append(out, Crypto{})
+		case text == "":
+			continue
+		}
+
+		kv := strings.Split(text, ":")
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
+		}
+
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
+
+		// Parse the key/value pair into the currently focused element.
+		c := &out[len(out)-1]
+		if err := c.parseKV(k, v); err != nil {
+			return nil, err
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	return out, nil
+}
+
+// parseKV parses a key/value pair into the appropriate field of c.
+func (c *Crypto) parseKV(k, v string) error {
+	vp := util.NewValueParser(v)
+
+	switch k {
+	case "async":
+		// Interpret literal yes as true.
+		c.Async = v == "yes"
+	case "blocksize":
+		c.Blocksize = vp.PUInt64()
+	case "chunksize":
+		c.Chunksize = vp.PUInt64()
+	case "digestsize":
+		c.Digestsize = vp.PUInt64()
+	case "driver":
+		c.Driver = v
+	case "geniv":
+		c.Geniv = v
+	case "internal":
+		c.Internal = v
+	case "ivsize":
+		c.Ivsize = vp.PUInt64()
+	case "maxauthsize":
+		c.Maxauthsize = vp.PUInt64()
+	case "max keysize":
+		c.MaxKeysize = vp.PUInt64()
+	case "min keysize":
+		c.MinKeysize = vp.PUInt64()
+	case "module":
+		c.Module = v
+	case "name":
+		c.Name = v
+	case "priority":
+		c.Priority = vp.PInt64()
+	case "refcnt":
+		c.Refcnt = vp.PInt64()
+	case "seedsize":
+		c.Seedsize = vp.PUInt64()
+	case "selftest":
+		c.Selftest = v
+	case "type":
+		c.Type = v
+	case "walksize":
+		c.Walksize = vp.PUInt64()
+	}
+
+	return vp.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..f9d961e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+//	package main
+//
+//	import (
+//		"fmt"
+//		"log"
+//
+//		"github.com/prometheus/procfs"
+//	)
+//
+//	func main() {
+//		p, err := procfs.Self()
+//		if err != nil {
+//			log.Fatalf("could not get process: %s", err)
+//		}
+//
+//		stat, err := p.Stat()
+//		if err != nil {
+//			log.Fatalf("could not get process stat: %s", err)
+//		}
+//
+//		fmt.Printf("command:  %s\n", stat.Comm)
+//		fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+//		fmt.Printf("vsize:    %dB\n", stat.VirtualMemory())
+//		fmt.Printf("rss:      %dB\n", stat.ResidentMemory())
+//	}
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..9bdaccc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"github.com/prometheus/procfs/internal/fs"
+)
+
+// FS represents the pseudo-filesystem sys, which provides an interface to
+// kernel data structures.
+type FS struct {
+	proc   fs.FS
+	isReal bool
+}
+
+const (
+	// DefaultMountPoint is the common mount point of the proc filesystem.
+	DefaultMountPoint = fs.DefaultProcMountPoint
+
+	// SectorSize represents the size of a sector in bytes.
+	// It is specific to Linux block I/O operations.
+	SectorSize = 512
+)
+
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+	return NewFS(DefaultMountPoint)
+}
+
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
+func NewFS(mountPoint string) (FS, error) {
+	fs, err := fs.NewFS(mountPoint)
+	if err != nil {
+		return FS{}, err
+	}
+
+	isReal, err := isRealProc(mountPoint)
+	if err != nil {
+		return FS{}, err
+	}
+
+	return FS{fs, isReal}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000..1b5bdbd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
+	return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000..80df79c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+	"syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+	stat := syscall.Statfs_t{}
+	err := syscall.Statfs(mountPoint, &stat)
+	if err != nil {
+		return false, err
+	}
+
+	// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+	return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
new file mode 100644
index 0000000..7db8633
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -0,0 +1,422 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Fscacheinfo represents fscache statistics.
+type Fscacheinfo struct {
+	// Number of index cookies allocated
+	IndexCookiesAllocated uint64
+	// data storage cookies allocated
+	DataStorageCookiesAllocated uint64
+	// Number of special cookies allocated
+	SpecialCookiesAllocated uint64
+	// Number of objects allocated
+	ObjectsAllocated uint64
+	// Number of object allocation failures
+	ObjectAllocationsFailure uint64
+	// Number of objects that reached the available state
+	ObjectsAvailable uint64
+	// Number of objects that reached the dead state
+	ObjectsDead uint64
+	// Number of objects that didn't have a coherency check
+	ObjectsWithoutCoherencyCheck uint64
+	// Number of objects that passed a coherency check
+	ObjectsWithCoherencyCheck uint64
+	// Number of objects that needed a coherency data update
+	ObjectsNeedCoherencyCheckUpdate uint64
+	// Number of objects that were declared obsolete
+	ObjectsDeclaredObsolete uint64
+	// Number of pages marked as being cached
+	PagesMarkedAsBeingCached uint64
+	// Number of uncache page requests seen
+	UncachePagesRequestSeen uint64
+	// Number of acquire cookie requests seen
+	AcquireCookiesRequestSeen uint64
+	// Number of acq reqs given a NULL parent
+	AcquireRequestsWithNullParent uint64
+	// Number of acq reqs rejected due to no cache available
+	AcquireRequestsRejectedNoCacheAvailable uint64
+	// Number of acq reqs succeeded
+	AcquireRequestsSucceeded uint64
+	// Number of acq reqs rejected due to error
+	AcquireRequestsRejectedDueToError uint64
+	// Number of acq reqs failed on ENOMEM
+	AcquireRequestsFailedDueToEnomem uint64
+	// Number of lookup calls made on cache backends
+	LookupsNumber uint64
+	// Number of negative lookups made
+	LookupsNegative uint64
+	// Number of positive lookups made
+	LookupsPositive uint64
+	// Number of objects created by lookup
+	ObjectsCreatedByLookup uint64
+	// Number of lookups timed out and requeued
+	LookupsTimedOutAndRequed uint64
+	InvalidationsNumber      uint64
+	InvalidationsRunning     uint64
+	// Number of update cookie requests seen
+	UpdateCookieRequestSeen uint64
+	// Number of upd reqs given a NULL parent
+	UpdateRequestsWithNullParent uint64
+	// Number of upd reqs granted CPU time
+	UpdateRequestsRunning uint64
+	// Number of relinquish cookie requests seen
+	RelinquishCookiesRequestSeen uint64
+	// Number of rlq reqs given a NULL parent
+	RelinquishCookiesWithNullParent uint64
+	// Number of rlq reqs waited on completion of creation
+	RelinquishRequestsWaitingCompleteCreation uint64
+	// Relinqs rtr
+	RelinquishRetries uint64
+	// Number of attribute changed requests seen
+	AttributeChangedRequestsSeen uint64
+	// Number of attr changed requests queued
+	AttributeChangedRequestsQueued uint64
+	// Number of attr changed rejected -ENOBUFS
+	AttributeChangedRejectDueToEnobufs uint64
+	// Number of attr changed failed -ENOMEM
+	AttributeChangedFailedDueToEnomem uint64
+	// Number of attr changed ops given CPU time
+	AttributeChangedOps uint64
+	// Number of allocation requests seen
+	AllocationRequestsSeen uint64
+	// Number of successful alloc reqs
+	AllocationOkRequests uint64
+	// Number of alloc reqs that waited on lookup completion
+	AllocationWaitingOnLookup uint64
+	// Number of alloc reqs rejected -ENOBUFS
+	AllocationsRejectedDueToEnobufs uint64
+	// Number of alloc reqs aborted -ERESTARTSYS
+	AllocationsAbortedDueToErestartsys uint64
+	// Number of alloc reqs submitted
+	AllocationOperationsSubmitted uint64
+	// Number of alloc reqs waited for CPU time
+	AllocationsWaitedForCPU uint64
+	// Number of alloc reqs aborted due to object death
+	AllocationsAbortedDueToObjectDeath uint64
+	// Number of retrieval (read) requests seen
+	RetrievalsReadRequests uint64
+	// Number of successful retr reqs
+	RetrievalsOk uint64
+	// Number of retr reqs that waited on lookup completion
+	RetrievalsWaitingLookupCompletion uint64
+	// Number of retr reqs returned -ENODATA
+	RetrievalsReturnedEnodata uint64
+	// Number of retr reqs rejected -ENOBUFS
+	RetrievalsRejectedDueToEnobufs uint64
+	// Number of retr reqs aborted -ERESTARTSYS
+	RetrievalsAbortedDueToErestartsys uint64
+	// Number of retr reqs failed -ENOMEM
+	RetrievalsFailedDueToEnomem uint64
+	// Number of retr reqs submitted
+	RetrievalsRequests uint64
+	// Number of retr reqs waited for CPU time
+	RetrievalsWaitingCPU uint64
+	// Number of retr reqs aborted due to object death
+	RetrievalsAbortedDueToObjectDeath uint64
+	// Number of storage (write) requests seen
+	StoreWriteRequests uint64
+	// Number of successful store reqs
+	StoreSuccessfulRequests uint64
+	// Number of store reqs on a page already pending storage
+	StoreRequestsOnPendingStorage uint64
+	// Number of store reqs rejected -ENOBUFS
+	StoreRequestsRejectedDueToEnobufs uint64
+	// Number of store reqs failed -ENOMEM
+	StoreRequestsFailedDueToEnomem uint64
+	// Number of store reqs submitted
+	StoreRequestsSubmitted uint64
+	// Number of store reqs granted CPU time
+	StoreRequestsRunning uint64
+	// Number of pages given store req processing time
+	StorePagesWithRequestsProcessing uint64
+	// Number of store reqs deleted from tracking tree
+	StoreRequestsDeleted uint64
+	// Number of store reqs over store limit
+	StoreRequestsOverStoreLimit uint64
+	// Number of release reqs against pages with no pending store
+	ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
+	// Number of release reqs against pages stored by time lock granted
+	ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
+	// Number of release reqs ignored due to in-progress store
+	ReleaseRequestsIgnoredDueToInProgressStore uint64
+	// Number of page stores canceled due to release req
+	PageStoresCancelledByReleaseRequests uint64
+	VmscanWaiting                        uint64
+	// Number of times async ops added to pending queues
+	OpsPending uint64
+	// Number of times async ops given CPU time
+	OpsRunning uint64
+	// Number of times async ops queued for processing
+	OpsEnqueued uint64
+	// Number of async ops canceled
+	OpsCancelled uint64
+	// Number of async ops rejected due to object lookup/create failure
+	OpsRejected uint64
+	// Number of async ops initialized
+	OpsInitialised uint64
+	// Number of async ops queued for deferred release
+	OpsDeferred uint64
+	// Number of async ops released (should equal ini=N when idle)
+	OpsReleased uint64
+	// Number of deferred-release async ops garbage collected
+	OpsGarbageCollected uint64
+	// Number of in-progress alloc_object() cache ops
+	CacheopAllocationsinProgress uint64
+	// Number of in-progress lookup_object() cache ops
+	CacheopLookupObjectInProgress uint64
+	// Number of in-progress lookup_complete() cache ops
+	CacheopLookupCompleteInPorgress uint64
+	// Number of in-progress grab_object() cache ops
+	CacheopGrabObjectInProgress uint64
+	CacheopInvalidations        uint64
+	// Number of in-progress update_object() cache ops
+	CacheopUpdateObjectInProgress uint64
+	// Number of in-progress drop_object() cache ops
+	CacheopDropObjectInProgress uint64
+	// Number of in-progress put_object() cache ops
+	CacheopPutObjectInProgress uint64
+	// Number of in-progress attr_changed() cache ops
+	CacheopAttributeChangeInProgress uint64
+	// Number of in-progress sync_cache() cache ops
+	CacheopSyncCacheInProgress uint64
+	// Number of in-progress read_or_alloc_page() cache ops
+	CacheopReadOrAllocPageInProgress uint64
+	// Number of in-progress read_or_alloc_pages() cache ops
+	CacheopReadOrAllocPagesInProgress uint64
+	// Number of in-progress allocate_page() cache ops
+	CacheopAllocatePageInProgress uint64
+	// Number of in-progress allocate_pages() cache ops
+	CacheopAllocatePagesInProgress uint64
+	// Number of in-progress write_page() cache ops
+	CacheopWritePagesInProgress uint64
+	// Number of in-progress uncache_page() cache ops
+	CacheopUncachePagesInProgress uint64
+	// Number of in-progress dissociate_pages() cache ops
+	CacheopDissociatePagesInProgress uint64
+	// Number of object lookups/creations rejected due to lack of space
+	CacheevLookupsAndCreationsRejectedLackSpace uint64
+	// Number of stale objects deleted
+	CacheevStaleObjectsDeleted uint64
+	// Number of objects retired when relinquished
+	CacheevRetiredWhenReliquished uint64
+	// Number of objects culled
+	CacheevObjectsCulled uint64
+}
+
+// Fscacheinfo returns information about current fscache statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
+func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
+	if err != nil {
+		return Fscacheinfo{}, err
+	}
+
+	m, err := parseFscacheinfo(bytes.NewReader(b))
+	if err != nil {
+		return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
+	}
+
+	return *m, nil
+}
+
+func setFSCacheFields(fields []string, setFields ...*uint64) error {
+	var err error
+	if len(fields) < len(setFields) {
+		return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
+	}
+
+	for i := range setFields {
+		*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
+	var m Fscacheinfo
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		if len(fields) < 2 {
+			return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
+		}
+
+		switch fields[0] {
+		case "Cookies:":
+			err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
+				&m.SpecialCookiesAllocated)
+			if err != nil {
+				return &m, err
+			}
+		case "Objects:":
+			err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
+				&m.ObjectsAvailable, &m.ObjectsDead)
+			if err != nil {
+				return &m, err
+			}
+		case "ChkAux":
+			err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
+				&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
+			if err != nil {
+				return &m, err
+			}
+		case "Pages":
+			err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
+			if err != nil {
+				return &m, err
+			}
+		case "Acquire:":
+			err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
+				&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
+				&m.AcquireRequestsFailedDueToEnomem)
+			if err != nil {
+				return &m, err
+			}
+		case "Lookups:":
+			err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
+				&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
+			if err != nil {
+				return &m, err
+			}
+		case "Invals":
+			err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
+			if err != nil {
+				return &m, err
+			}
+		case "Updates:":
+			err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
+				&m.UpdateRequestsRunning)
+			if err != nil {
+				return &m, err
+			}
+		case "Relinqs:":
+			err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
+				&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
+			if err != nil {
+				return &m, err
+			}
+		case "AttrChg:":
+			err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
+				&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
+			if err != nil {
+				return &m, err
+			}
+		case "Allocs":
+			if strings.Split(fields[2], "=")[0] == "n" {
+				err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
+					&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
+					&m.AllocationsAbortedDueToObjectDeath)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "Retrvls:":
+			if strings.Split(fields[1], "=")[0] == "n" {
+				err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
+					&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
+					&m.RetrievalsFailedDueToEnomem)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "Stores":
+			if strings.Split(fields[2], "=")[0] == "n" {
+				err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
+					&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
+					&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "VmScan":
+			err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
+				&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
+				&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
+			if err != nil {
+				return &m, err
+			}
+		case "Ops":
+			if strings.Split(fields[2], "=")[0] == "pend" {
+				err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "CacheOp:":
+			if strings.Split(fields[1], "=")[0] == "alo" {
+				err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
+					&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
+				if err != nil {
+					return &m, err
+				}
+			} else if strings.Split(fields[1], "=")[0] == "inv" {
+				err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
+					&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
+					&m.CacheopSyncCacheInProgress)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
+					&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
+					&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "CacheEv:":
+			err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
+				&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
+			if err != nil {
+				return &m, err
+			}
+		}
+	}
+
+	return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
new file mode 100644
index 0000000..3a43e83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+const (
+	// DefaultProcMountPoint is the common mount point of the proc filesystem.
+	DefaultProcMountPoint = "/proc"
+
+	// DefaultSysMountPoint is the common mount point of the sys filesystem.
+	DefaultSysMountPoint = "/sys"
+
+	// DefaultConfigfsMountPoint is the common mount point of the configfs.
+	DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+	// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+	DefaultSelinuxMountPoint = "/sys/fs/selinux"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+	info, err := os.Stat(mountPoint)
+	if err != nil {
+		return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
+	}
+	if !info.IsDir() {
+		return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
+	}
+
+	return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+	return filepath.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 0000000..5a7d2df
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"errors"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+	us := make([]uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, u)
+	}
+
+	return us, nil
+}
+
+// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
+func ParsePInt64s(ss []string) ([]*int64, error) {
+	us := make([]*int64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseInt(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+	us := make([]*uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ReadIntFromFile reads a file and attempts to parse a int64 from it.
+func ReadIntFromFile(path string) (int64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ParseBool parses a string into a boolean pointer.
+func ParseBool(b string) *bool {
+	var truth bool
+	switch b {
+	case "enabled":
+		truth = true
+	case "disabled":
+		truth = false
+	default:
+		return nil
+	}
+	return &truth
+}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	hexString := strings.TrimSpace(string(data))
+	if !strings.HasPrefix(hexString, "0x") {
+		return 0, errors.New("invalid format: hex string does not start with '0x'")
+	}
+	return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
new file mode 100644
index 0000000..71b7a70
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"io"
+	"os"
+)
+
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
+// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
+// Reads a max file size of 1024kB.  For files larger than this, a scanner
+// should be used.
+func ReadFileNoStat(filename string) ([]byte, error) {
+	const maxBufferSize = 1024 * 1024
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	reader := io.LimitReader(f, maxBufferSize)
+	return io.ReadAll(reader)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
new file mode 100644
index 0000000..d5404a6
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
+
+package util
+
+import (
+	"bytes"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+)
+
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+//
+// Note that this function will not read files larger than 128 bytes.
+func SysReadFile(file string) (string, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	// On some machines, hwmon drivers are broken and return EAGAIN.  This causes
+	// Go's os.ReadFile implementation to poll forever.
+	//
+	// Since we either want to read data or bail immediately, do the simplest
+	// possible read using syscall directly.
+	const sysFileBufferSize = 128
+	b := make([]byte, sysFileBufferSize)
+	n, err := syscall.Read(int(f.Fd()), b)
+	if err != nil {
+		return "", err
+	}
+
+	return string(bytes.TrimSpace(b[:n])), nil
+}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
new file mode 100644
index 0000000..1d86f5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
+
+package util
+
+import (
+	"fmt"
+)
+
+// SysReadFile is here implemented as a noop for builds that do not support
+// the read syscall. For example Windows, or Linux on Google App Engine.
+func SysReadFile(file string) (string, error) {
+	return "", fmt.Errorf("not supported on this platform")
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
new file mode 100644
index 0000000..fe2355d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"strconv"
+)
+
+// TODO(mdlayher): util packages are an anti-pattern and this should be moved
+// somewhere else that is more focused in the future.
+
+// A ValueParser enables parsing a single string into a variety of data types
+// in a concise and safe way. The Err method must be invoked after invoking
+// any other methods to ensure a value was successfully parsed.
+type ValueParser struct {
+	v   string
+	err error
+}
+
+// NewValueParser creates a ValueParser using the input string.
+func NewValueParser(v string) *ValueParser {
+	return &ValueParser{v: v}
+}
+
+// Int interprets the underlying value as an int and returns that value.
+func (vp *ValueParser) Int() int { return int(vp.int64()) }
+
+// PInt64 interprets the underlying value as an int64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PInt64() *int64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	v := vp.int64()
+	return &v
+}
+
+// int64 interprets the underlying value as an int64 and returns that value.
+// TODO: export if/when necessary.
+func (vp *ValueParser) int64() int64 {
+	if vp.err != nil {
+		return 0
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseInt(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return 0
+	}
+
+	return v
+}
+
+// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PUInt64() *uint64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseUint(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return nil
+	}
+
+	return &v
+}
+
+// Err returns the last error, if any, encountered by the ValueParser.
+func (vp *ValueParser) Err() error {
+	return vp.err
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000..bc3a20c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,241 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+	// Total count of connections.
+	Connections uint64
+	// Total incoming packages processed.
+	IncomingPackets uint64
+	// Total outgoing packages processed.
+	OutgoingPackets uint64
+	// Total incoming traffic.
+	IncomingBytes uint64
+	// Total outgoing traffic.
+	OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+	// The local (virtual) IP address.
+	LocalAddress net.IP
+	// The remote (real) IP address.
+	RemoteAddress net.IP
+	// The local (virtual) port.
+	LocalPort uint16
+	// The remote (real) port.
+	RemotePort uint16
+	// The local firewall mark
+	LocalMark string
+	// The transport protocol (TCP, UDP).
+	Proto string
+	// The current number of active connections for this virtual/real address pair.
+	ActiveConn uint64
+	// The current number of inactive connections for this virtual/real address pair.
+	InactConn uint64
+	// The current weight of this virtual/real address pair.
+	Weight uint64
+}
+
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return parseIPVSStats(bytes.NewReader(data))
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(r io.Reader) (IPVSStats, error) {
+	var (
+		statContent []byte
+		statLines   []string
+		statFields  []string
+		stats       IPVSStats
+	)
+
+	statContent, err := io.ReadAll(r)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	statLines = strings.SplitN(string(statContent), "\n", 4)
+	if len(statLines) != 4 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+	}
+
+	statFields = strings.Fields(statLines[2])
+	if len(statFields) != 5 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+	}
+
+	stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return stats, nil
+}
+
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+	file, err := os.Open(fs.proc.Path("net/ip_vs"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+	var (
+		status       []IPVSBackendStatus
+		scanner      = bufio.NewScanner(file)
+		proto        string
+		localMark    string
+		localAddress net.IP
+		localPort    uint16
+		err          error
+	)
+
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		if len(fields) == 0 {
+			continue
+		}
+		switch {
+		case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+			continue
+		case fields[0] == "TCP" || fields[0] == "UDP":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = ""
+			localAddress, localPort, err = parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+		case fields[0] == "FWM":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = fields[1]
+			localAddress = nil
+			localPort = 0
+		case fields[0] == "->":
+			if len(fields) < 6 {
+				continue
+			}
+			remoteAddress, remotePort, err := parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+			weight, err := strconv.ParseUint(fields[3], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			status = append(status, IPVSBackendStatus{
+				LocalAddress:  localAddress,
+				LocalPort:     localPort,
+				LocalMark:     localMark,
+				RemoteAddress: remoteAddress,
+				RemotePort:    remotePort,
+				Proto:         proto,
+				Weight:        weight,
+				ActiveConn:    activeConn,
+				InactConn:     inactConn,
+			})
+		}
+	}
+	return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+	var (
+		ip  net.IP
+		err error
+	)
+
+	switch len(s) {
+	case 13:
+		ip, err = hex.DecodeString(s[0:8])
+		if err != nil {
+			return nil, 0, err
+		}
+	case 46:
+		ip = net.ParseIP(s[1:40])
+		if ip == nil {
+			return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
+		}
+	default:
+		return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
+	}
+
+	portString := s[len(s)-4:]
+	if len(portString) != 4 {
+		return nil, 0,
+			fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
+	}
+	port, err := strconv.ParseUint(portString, 16, 16)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
new file mode 100644
index 0000000..db88566
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"os"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// KernelRandom contains information about to the kernel's random number generator.
+type KernelRandom struct {
+	// EntropyAvaliable gives the available entropy, in bits.
+	EntropyAvaliable *uint64
+	// PoolSize gives the size of the entropy pool, in bits.
+	PoolSize *uint64
+	// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
+	URandomMinReseedSeconds *uint64
+	// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
+	// that do a select(2) or poll(2) for write access to /dev/random.
+	WriteWakeupThreshold *uint64
+	// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
+	// waiting for entropy from /dev/random.
+	ReadWakeupThreshold *uint64
+}
+
+// KernelRandom returns values from /proc/sys/kernel/random.
+func (fs FS) KernelRandom() (KernelRandom, error) {
+	random := KernelRandom{}
+
+	for file, p := range map[string]**uint64{
+		"entropy_avail":           &random.EntropyAvaliable,
+		"poolsize":                &random.PoolSize,
+		"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
+		"write_wakeup_threshold":  &random.WriteWakeupThreshold,
+		"read_wakeup_threshold":   &random.ReadWakeupThreshold,
+	} {
+		val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
+		if os.IsNotExist(err) {
+			continue
+		}
+		if err != nil {
+			return random, err
+		}
+		*p = &val
+	}
+
+	return random, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
new file mode 100644
index 0000000..332e76c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// LoadAvg represents an entry in /proc/loadavg.
+type LoadAvg struct {
+	Load1  float64
+	Load5  float64
+	Load15 float64
+}
+
+// LoadAvg returns loadavg from /proc.
+func (fs FS) LoadAvg() (*LoadAvg, error) {
+	path := fs.proc.Path("loadavg")
+
+	data, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+	return parseLoad(data)
+}
+
+// Parse /proc loadavg and return 1m, 5m and 15m.
+func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
+	loads := make([]float64, 3)
+	parts := strings.Fields(string(loadavgBytes))
+	if len(parts) < 3 {
+		return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
+	}
+
+	var err error
+	for i, load := range parts[0:3] {
+		loads[i], err = strconv.ParseFloat(load, 64)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
+		}
+	}
+	return &LoadAvg{
+		Load1:  loads[0],
+		Load5:  loads[1],
+		Load15: loads[2],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000..67a9d2b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	statusLineRE         = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
+	recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
+	recoveryLinePctRE    = regexp.MustCompile(`= (.+)%`)
+	recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
+	recoveryLineSpeedRE  = regexp.MustCompile(`speed=(.+)[A-Z]`)
+	componentDeviceRE    = regexp.MustCompile(`(.*)\[\d+\]`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+	// Name of the device.
+	Name string
+	// activity-state of the device.
+	ActivityState string
+	// Number of active disks.
+	DisksActive int64
+	// Total number of disks the device requires.
+	DisksTotal int64
+	// Number of failed disks.
+	DisksFailed int64
+	// Number of "down" disks. (the _ indicator in the status line)
+	DisksDown int64
+	// Spare disks in the device.
+	DisksSpare int64
+	// Number of blocks the device holds.
+	BlocksTotal int64
+	// Number of blocks on the device that are in sync.
+	BlocksSynced int64
+	// Number of blocks on the device that need to be synced.
+	BlocksToBeSynced int64
+	// progress percentage of current sync
+	BlocksSyncedPct float64
+	// estimated finishing time for current sync (in minutes)
+	BlocksSyncedFinishTime float64
+	// current sync speed (in Kilobytes/sec)
+	BlocksSyncedSpeed float64
+	// Name of md component devices
+	Devices []string
+}
+
+// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://raid.wiki.kernel.org/index.php/Mdstat
+func (fs FS) MDStat() ([]MDStat, error) {
+	data, err := os.ReadFile(fs.proc.Path("mdstat"))
+	if err != nil {
+		return nil, err
+	}
+	mdstat, err := parseMDStat(data)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
+	}
+	return mdstat, nil
+}
+
+// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.
+func parseMDStat(mdStatData []byte) ([]MDStat, error) {
+	mdStats := []MDStat{}
+	lines := strings.Split(string(mdStatData), "\n")
+
+	for i, line := range lines {
+		if strings.TrimSpace(line) == "" || line[0] == ' ' ||
+			strings.HasPrefix(line, "Personalities") ||
+			strings.HasPrefix(line, "unused") {
+			continue
+		}
+
+		deviceFields := strings.Fields(line)
+		if len(deviceFields) < 3 {
+			return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
+		}
+		mdName := deviceFields[0] // mdx
+		state := deviceFields[2]  // active or inactive
+
+		if len(lines) <= i+3 {
+			return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
+		}
+
+		// Failed disks have the suffix (F) & Spare disks have the suffix (S).
+		fail := int64(strings.Count(line, "(F)"))
+		spare := int64(strings.Count(line, "(S)"))
+		active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
+
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
+		}
+
+		syncLineIdx := i + 2
+		if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+			syncLineIdx++
+		}
+
+		// If device is syncing at the moment, get the number of currently
+		// synced bytes, otherwise that number equals the size of the device.
+		blocksSynced := size
+		blocksToBeSynced := size
+		speed := float64(0)
+		finish := float64(0)
+		pct := float64(0)
+		recovering := strings.Contains(lines[syncLineIdx], "recovery")
+		resyncing := strings.Contains(lines[syncLineIdx], "resync")
+		checking := strings.Contains(lines[syncLineIdx], "check")
+
+		// Append recovery and resyncing state info.
+		if recovering || resyncing || checking {
+			if recovering {
+				state = "recovering"
+			} else if checking {
+				state = "checking"
+			} else {
+				state = "resyncing"
+			}
+
+			// Handle case when resync=PENDING or resync=DELAYED.
+			if strings.Contains(lines[syncLineIdx], "PENDING") ||
+				strings.Contains(lines[syncLineIdx], "DELAYED") {
+				blocksSynced = 0
+			} else {
+				blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+				if err != nil {
+					return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
+				}
+			}
+		}
+
+		mdStats = append(mdStats, MDStat{
+			Name:                   mdName,
+			ActivityState:          state,
+			DisksActive:            active,
+			DisksFailed:            fail,
+			DisksDown:              down,
+			DisksSpare:             spare,
+			DisksTotal:             total,
+			BlocksTotal:            size,
+			BlocksSynced:           blocksSynced,
+			BlocksToBeSynced:       blocksToBeSynced,
+			BlocksSyncedPct:        pct,
+			BlocksSyncedFinishTime: finish,
+			BlocksSyncedSpeed:      speed,
+			Devices:                evalComponentDevices(deviceFields),
+		})
+	}
+
+	return mdStats, nil
+}
+
+func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+	statusFields := strings.Fields(statusLine)
+	if len(statusFields) < 1 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	sizeStr := statusFields[0]
+	size, err = strconv.ParseInt(sizeStr, 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
+		// In the device deviceLine, only disks have a number associated with them in [].
+		total = int64(strings.Count(deviceLine, "["))
+		return total, total, 0, size, nil
+	}
+
+	if strings.Contains(deviceLine, "inactive") {
+		return 0, 0, 0, size, nil
+	}
+
+	matches := statusLineRE.FindStringSubmatch(statusLine)
+	if len(matches) != 5 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
+	}
+
+	total, err = strconv.ParseInt(matches[2], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	active, err = strconv.ParseInt(matches[3], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
+	}
+	down = int64(strings.Count(matches[4], "_"))
+
+	return active, total, down, size, nil
+}
+
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
+	matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
+	}
+
+	blocks := strings.Split(matches[1], "/")
+	blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+	}
+
+	blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+	if err != nil {
+		return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
+	}
+
+	// Get percentage complete
+	matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+	}
+	pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get time expected left to complete
+	matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+	}
+	finish, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get recovery speed
+	matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+	}
+	speed, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+	}
+
+	return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
+}
+
+func evalComponentDevices(deviceFields []string) []string {
+	mdComponentDevices := make([]string, 0)
+	if len(deviceFields) > 3 {
+		for _, field := range deviceFields[4:] {
+			match := componentDeviceRE.FindStringSubmatch(field)
+			if match == nil {
+				continue
+			}
+			mdComponentDevices = append(mdComponentDevices, match[1])
+		}
+	}
+
+	return mdComponentDevices
+}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
new file mode 100644
index 0000000..4b2c405
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -0,0 +1,389 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Meminfo represents memory statistics.
+type Meminfo struct {
+	// Total usable ram (i.e. physical ram minus a few reserved
+	// bits and the kernel binary code)
+	MemTotal *uint64
+	// The sum of LowFree+HighFree
+	MemFree *uint64
+	// An estimate of how much memory is available for starting
+	// new applications, without swapping. Calculated from
+	// MemFree, SReclaimable, the size of the file LRU lists, and
+	// the low watermarks in each zone.  The estimate takes into
+	// account that the system needs some page cache to function
+	// well, and that not all reclaimable slab will be
+	// reclaimable, due to items being in use. The impact of those
+	// factors will vary from system to system.
+	MemAvailable *uint64
+	// Relatively temporary storage for raw disk blocks shouldn't
+	// get tremendously large (20MB or so)
+	Buffers *uint64
+	Cached  *uint64
+	// Memory that once was swapped out, is swapped back in but
+	// still also is in the swapfile (if memory is needed it
+	// doesn't need to be swapped out AGAIN because it is already
+	// in the swapfile. This saves I/O)
+	SwapCached *uint64
+	// Memory that has been used more recently and usually not
+	// reclaimed unless absolutely necessary.
+	Active *uint64
+	// Memory which has been less recently used.  It is more
+	// eligible to be reclaimed for other purposes
+	Inactive     *uint64
+	ActiveAnon   *uint64
+	InactiveAnon *uint64
+	ActiveFile   *uint64
+	InactiveFile *uint64
+	Unevictable  *uint64
+	Mlocked      *uint64
+	// total amount of swap space available
+	SwapTotal *uint64
+	// Memory which has been evicted from RAM, and is temporarily
+	// on the disk
+	SwapFree *uint64
+	// Memory which is waiting to get written back to the disk
+	Dirty *uint64
+	// Memory which is actively being written back to the disk
+	Writeback *uint64
+	// Non-file backed pages mapped into userspace page tables
+	AnonPages *uint64
+	// files which have been mapped, such as libraries
+	Mapped *uint64
+	Shmem  *uint64
+	// in-kernel data structures cache
+	Slab *uint64
+	// Part of Slab, that might be reclaimed, such as caches
+	SReclaimable *uint64
+	// Part of Slab, that cannot be reclaimed on memory pressure
+	SUnreclaim  *uint64
+	KernelStack *uint64
+	// amount of memory dedicated to the lowest level of page
+	// tables.
+	PageTables *uint64
+	// NFS pages sent to the server, but not yet committed to
+	// stable storage
+	NFSUnstable *uint64
+	// Memory used for block device "bounce buffers"
+	Bounce *uint64
+	// Memory used by FUSE for temporary writeback buffers
+	WritebackTmp *uint64
+	// Based on the overcommit ratio ('vm.overcommit_ratio'),
+	// this is the total amount of  memory currently available to
+	// be allocated on the system. This limit is only adhered to
+	// if strict overcommit accounting is enabled (mode 2 in
+	// 'vm.overcommit_memory').
+	// The CommitLimit is calculated with the following formula:
+	// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
+	//                overcommit_ratio / 100 + [total swap pages]
+	// For example, on a system with 1G of physical RAM and 7G
+	// of swap with a `vm.overcommit_ratio` of 30 it would
+	// yield a CommitLimit of 7.3G.
+	// For more details, see the memory overcommit documentation
+	// in vm/overcommit-accounting.
+	CommitLimit *uint64
+	// The amount of memory presently allocated on the system.
+	// The committed memory is a sum of all of the memory which
+	// has been allocated by processes, even if it has not been
+	// "used" by them as of yet. A process which malloc()'s 1G
+	// of memory, but only touches 300M of it will show up as
+	// using 1G. This 1G is memory which has been "committed" to
+	// by the VM and can be used at any time by the allocating
+	// application. With strict overcommit enabled on the system
+	// (mode 2 in 'vm.overcommit_memory'),allocations which would
+	// exceed the CommitLimit (detailed above) will not be permitted.
+	// This is useful if one needs to guarantee that processes will
+	// not fail due to lack of memory once that memory has been
+	// successfully allocated.
+	CommittedAS *uint64
+	// total size of vmalloc memory area
+	VmallocTotal *uint64
+	// amount of vmalloc area which is used
+	VmallocUsed *uint64
+	// largest contiguous block of vmalloc area which is free
+	VmallocChunk      *uint64
+	Percpu            *uint64
+	HardwareCorrupted *uint64
+	AnonHugePages     *uint64
+	ShmemHugePages    *uint64
+	ShmemPmdMapped    *uint64
+	CmaTotal          *uint64
+	CmaFree           *uint64
+	HugePagesTotal    *uint64
+	HugePagesFree     *uint64
+	HugePagesRsvd     *uint64
+	HugePagesSurp     *uint64
+	Hugepagesize      *uint64
+	DirectMap4k       *uint64
+	DirectMap2M       *uint64
+	DirectMap1G       *uint64
+
+	// The struct fields below are the byte-normalized counterparts to the
+	// existing struct fields. Values are normalized using the optional
+	// unit field in the meminfo line.
+	MemTotalBytes          *uint64
+	MemFreeBytes           *uint64
+	MemAvailableBytes      *uint64
+	BuffersBytes           *uint64
+	CachedBytes            *uint64
+	SwapCachedBytes        *uint64
+	ActiveBytes            *uint64
+	InactiveBytes          *uint64
+	ActiveAnonBytes        *uint64
+	InactiveAnonBytes      *uint64
+	ActiveFileBytes        *uint64
+	InactiveFileBytes      *uint64
+	UnevictableBytes       *uint64
+	MlockedBytes           *uint64
+	SwapTotalBytes         *uint64
+	SwapFreeBytes          *uint64
+	DirtyBytes             *uint64
+	WritebackBytes         *uint64
+	AnonPagesBytes         *uint64
+	MappedBytes            *uint64
+	ShmemBytes             *uint64
+	SlabBytes              *uint64
+	SReclaimableBytes      *uint64
+	SUnreclaimBytes        *uint64
+	KernelStackBytes       *uint64
+	PageTablesBytes        *uint64
+	NFSUnstableBytes       *uint64
+	BounceBytes            *uint64
+	WritebackTmpBytes      *uint64
+	CommitLimitBytes       *uint64
+	CommittedASBytes       *uint64
+	VmallocTotalBytes      *uint64
+	VmallocUsedBytes       *uint64
+	VmallocChunkBytes      *uint64
+	PercpuBytes            *uint64
+	HardwareCorruptedBytes *uint64
+	AnonHugePagesBytes     *uint64
+	ShmemHugePagesBytes    *uint64
+	ShmemPmdMappedBytes    *uint64
+	CmaTotalBytes          *uint64
+	CmaFreeBytes           *uint64
+	HugepagesizeBytes      *uint64
+	DirectMap4kBytes       *uint64
+	DirectMap2MBytes       *uint64
+	DirectMap1GBytes       *uint64
+}
+
+// Meminfo returns an information about current kernel/system memory statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Meminfo() (Meminfo, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
+	if err != nil {
+		return Meminfo{}, err
+	}
+
+	m, err := parseMemInfo(bytes.NewReader(b))
+	if err != nil {
+		return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
+	}
+
+	return *m, nil
+}
+
+func parseMemInfo(r io.Reader) (*Meminfo, error) {
+	var m Meminfo
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		var val, valBytes uint64
+
+		val, err := strconv.ParseUint(fields[1], 0, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		switch len(fields) {
+		case 2:
+			// No unit present, use the parsed the value as bytes directly.
+			valBytes = val
+		case 3:
+			// Unit present in optional 3rd field, convert it to
+			// bytes. The only unit supported within the Linux
+			// kernel is `kB`.
+			if fields[2] != "kB" {
+				return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+			}
+
+			valBytes = 1024 * val
+
+		default:
+			return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+		}
+
+		switch fields[0] {
+		case "MemTotal:":
+			m.MemTotal = &val
+			m.MemTotalBytes = &valBytes
+		case "MemFree:":
+			m.MemFree = &val
+			m.MemFreeBytes = &valBytes
+		case "MemAvailable:":
+			m.MemAvailable = &val
+			m.MemAvailableBytes = &valBytes
+		case "Buffers:":
+			m.Buffers = &val
+			m.BuffersBytes = &valBytes
+		case "Cached:":
+			m.Cached = &val
+			m.CachedBytes = &valBytes
+		case "SwapCached:":
+			m.SwapCached = &val
+			m.SwapCachedBytes = &valBytes
+		case "Active:":
+			m.Active = &val
+			m.ActiveBytes = &valBytes
+		case "Inactive:":
+			m.Inactive = &val
+			m.InactiveBytes = &valBytes
+		case "Active(anon):":
+			m.ActiveAnon = &val
+			m.ActiveAnonBytes = &valBytes
+		case "Inactive(anon):":
+			m.InactiveAnon = &val
+			m.InactiveAnonBytes = &valBytes
+		case "Active(file):":
+			m.ActiveFile = &val
+			m.ActiveFileBytes = &valBytes
+		case "Inactive(file):":
+			m.InactiveFile = &val
+			m.InactiveFileBytes = &valBytes
+		case "Unevictable:":
+			m.Unevictable = &val
+			m.UnevictableBytes = &valBytes
+		case "Mlocked:":
+			m.Mlocked = &val
+			m.MlockedBytes = &valBytes
+		case "SwapTotal:":
+			m.SwapTotal = &val
+			m.SwapTotalBytes = &valBytes
+		case "SwapFree:":
+			m.SwapFree = &val
+			m.SwapFreeBytes = &valBytes
+		case "Dirty:":
+			m.Dirty = &val
+			m.DirtyBytes = &valBytes
+		case "Writeback:":
+			m.Writeback = &val
+			m.WritebackBytes = &valBytes
+		case "AnonPages:":
+			m.AnonPages = &val
+			m.AnonPagesBytes = &valBytes
+		case "Mapped:":
+			m.Mapped = &val
+			m.MappedBytes = &valBytes
+		case "Shmem:":
+			m.Shmem = &val
+			m.ShmemBytes = &valBytes
+		case "Slab:":
+			m.Slab = &val
+			m.SlabBytes = &valBytes
+		case "SReclaimable:":
+			m.SReclaimable = &val
+			m.SReclaimableBytes = &valBytes
+		case "SUnreclaim:":
+			m.SUnreclaim = &val
+			m.SUnreclaimBytes = &valBytes
+		case "KernelStack:":
+			m.KernelStack = &val
+			m.KernelStackBytes = &valBytes
+		case "PageTables:":
+			m.PageTables = &val
+			m.PageTablesBytes = &valBytes
+		case "NFS_Unstable:":
+			m.NFSUnstable = &val
+			m.NFSUnstableBytes = &valBytes
+		case "Bounce:":
+			m.Bounce = &val
+			m.BounceBytes = &valBytes
+		case "WritebackTmp:":
+			m.WritebackTmp = &val
+			m.WritebackTmpBytes = &valBytes
+		case "CommitLimit:":
+			m.CommitLimit = &val
+			m.CommitLimitBytes = &valBytes
+		case "Committed_AS:":
+			m.CommittedAS = &val
+			m.CommittedASBytes = &valBytes
+		case "VmallocTotal:":
+			m.VmallocTotal = &val
+			m.VmallocTotalBytes = &valBytes
+		case "VmallocUsed:":
+			m.VmallocUsed = &val
+			m.VmallocUsedBytes = &valBytes
+		case "VmallocChunk:":
+			m.VmallocChunk = &val
+			m.VmallocChunkBytes = &valBytes
+		case "Percpu:":
+			m.Percpu = &val
+			m.PercpuBytes = &valBytes
+		case "HardwareCorrupted:":
+			m.HardwareCorrupted = &val
+			m.HardwareCorruptedBytes = &valBytes
+		case "AnonHugePages:":
+			m.AnonHugePages = &val
+			m.AnonHugePagesBytes = &valBytes
+		case "ShmemHugePages:":
+			m.ShmemHugePages = &val
+			m.ShmemHugePagesBytes = &valBytes
+		case "ShmemPmdMapped:":
+			m.ShmemPmdMapped = &val
+			m.ShmemPmdMappedBytes = &valBytes
+		case "CmaTotal:":
+			m.CmaTotal = &val
+			m.CmaTotalBytes = &valBytes
+		case "CmaFree:":
+			m.CmaFree = &val
+			m.CmaFreeBytes = &valBytes
+		case "HugePages_Total:":
+			m.HugePagesTotal = &val
+		case "HugePages_Free:":
+			m.HugePagesFree = &val
+		case "HugePages_Rsvd:":
+			m.HugePagesRsvd = &val
+		case "HugePages_Surp:":
+			m.HugePagesSurp = &val
+		case "Hugepagesize:":
+			m.Hugepagesize = &val
+			m.HugepagesizeBytes = &valBytes
+		case "DirectMap4k:":
+			m.DirectMap4k = &val
+			m.DirectMap4kBytes = &valBytes
+		case "DirectMap2M:":
+			m.DirectMap2M = &val
+			m.DirectMap2MBytes = &valBytes
+		case "DirectMap1G:":
+			m.DirectMap1G = &val
+			m.DirectMap1GBytes = &valBytes
+		}
+	}
+
+	return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
new file mode 100644
index 0000000..a704c5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -0,0 +1,180 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A MountInfo is a type that describes the details, options
+// for each mount, parsed from /proc/self/mountinfo.
+// The fields described in each entry of /proc/self/mountinfo
+// is described in the following man page.
+// http://man7.org/linux/man-pages/man5/proc.5.html
+type MountInfo struct {
+	// Unique ID for the mount
+	MountID int
+	// The ID of the parent mount
+	ParentID int
+	// The value of `st_dev` for the files on this FS
+	MajorMinorVer string
+	// The pathname of the directory in the FS that forms
+	// the root for this mount
+	Root string
+	// The pathname of the mount point relative to the root
+	MountPoint string
+	// Mount options
+	Options map[string]string
+	// Zero or more optional fields
+	OptionalFields map[string]string
+	// The Filesystem type
+	FSType string
+	// FS specific information or "none"
+	Source string
+	// Superblock options
+	SuperOptions map[string]string
+}
+
+// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
+func parseMountInfo(info []byte) ([]*MountInfo, error) {
+	mounts := []*MountInfo{}
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	for scanner.Scan() {
+		mountString := scanner.Text()
+		parsedMounts, err := parseMountInfoString(mountString)
+		if err != nil {
+			return nil, err
+		}
+		mounts = append(mounts, parsedMounts)
+	}
+
+	err := scanner.Err()
+	return mounts, err
+}
+
+// Parses a mountinfo file line, and converts it to a MountInfo struct.
+// An important check here is to see if the hyphen separator, as if it does not exist,
+// it means that the line is malformed.
+func parseMountInfoString(mountString string) (*MountInfo, error) {
+	var err error
+
+	mountInfo := strings.Split(mountString, " ")
+	mountInfoLength := len(mountInfo)
+	if mountInfoLength < 10 {
+		return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
+	}
+
+	if mountInfo[mountInfoLength-4] != "-" {
+		return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
+	}
+
+	mount := &MountInfo{
+		MajorMinorVer:  mountInfo[2],
+		Root:           mountInfo[3],
+		MountPoint:     mountInfo[4],
+		Options:        mountOptionsParser(mountInfo[5]),
+		OptionalFields: nil,
+		FSType:         mountInfo[mountInfoLength-3],
+		Source:         mountInfo[mountInfoLength-2],
+		SuperOptions:   mountOptionsParser(mountInfo[mountInfoLength-1]),
+	}
+
+	mount.MountID, err = strconv.Atoi(mountInfo[0])
+	if err != nil {
+		return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
+	}
+	mount.ParentID, err = strconv.Atoi(mountInfo[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
+	}
+	// Has optional fields, which is a space separated list of values.
+	// Example: shared:2 master:7
+	if mountInfo[6] != "" {
+		mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
+		if err != nil {
+			return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
+		}
+	}
+	return mount, nil
+}
+
+// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
+func mountOptionsIsValidField(s string) bool {
+	switch s {
+	case
+		"shared",
+		"master",
+		"propagate_from",
+		"unbindable":
+		return true
+	}
+	return false
+}
+
+// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
+func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
+	optionalFields := make(map[string]string)
+	for _, field := range o {
+		optionSplit := strings.SplitN(field, ":", 2)
+		value := ""
+		if len(optionSplit) == 2 {
+			value = optionSplit[1]
+		}
+		if mountOptionsIsValidField(optionSplit[0]) {
+			optionalFields[optionSplit[0]] = value
+		}
+	}
+	return optionalFields, nil
+}
+
+// mountOptionsParser parses the mount options, superblock options.
+func mountOptionsParser(mountOptions string) map[string]string {
+	opts := make(map[string]string)
+	options := strings.Split(mountOptions, ",")
+	for _, opt := range options {
+		splitOption := strings.Split(opt, "=")
+		if len(splitOption) < 2 {
+			key := splitOption[0]
+			opts[key] = ""
+		} else {
+			key, value := splitOption[0], splitOption[1]
+			opts[key] = value
+		}
+	}
+	return opts
+}
+
+// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
+func GetMounts() ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat("/proc/self/mountinfo")
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
+
+// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
+func GetProcMounts(pid int) ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 0000000..50caa73
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,710 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Constants shared between multiple functions.
+const (
+	deviceEntryLen = 8
+
+	fieldBytesLen  = 8
+	fieldEventsLen = 27
+
+	statVersion10 = "1.0"
+	statVersion11 = "1.1"
+
+	fieldTransport10TCPLen = 10
+	fieldTransport10UDPLen = 7
+
+	fieldTransport11TCPLen = 13
+	fieldTransport11UDPLen = 10
+
+	// Kernel version >= 4.14 MaxLen
+	// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+	fieldTransport11RDMAMaxLen = 28
+
+	// Kernel version <= 4.2 MinLen
+	// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+	fieldTransport11RDMAMinLen = 20
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+	// Name of the device.
+	Device string
+	// The mount point of the device.
+	Mount string
+	// The filesystem type used by the device.
+	Type string
+	// If available additional statistics related to this Mount.
+	// Use a type assertion to determine if additional statistics are available.
+	Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+	mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+	// The version of statistics provided.
+	StatVersion string
+	// The mount options of the NFS mount.
+	Opts map[string]string
+	// The age of the NFS mount.
+	Age time.Duration
+	// Statistics related to byte counters for various operations.
+	Bytes NFSBytesStats
+	// Statistics related to various NFS event occurrences.
+	Events NFSEventsStats
+	// Statistics broken down by filesystem operation.
+	Operations []NFSOperationStats
+	// Statistics about the NFS RPC transport.
+	Transport []NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+	// Number of bytes read using the read() syscall.
+	Read uint64
+	// Number of bytes written using the write() syscall.
+	Write uint64
+	// Number of bytes read using the read() syscall in O_DIRECT mode.
+	DirectRead uint64
+	// Number of bytes written using the write() syscall in O_DIRECT mode.
+	DirectWrite uint64
+	// Number of bytes read from the NFS server, in total.
+	ReadTotal uint64
+	// Number of bytes written to the NFS server, in total.
+	WriteTotal uint64
+	// Number of pages read directly via mmap()'d files.
+	ReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+	// Number of times cached inode attributes are re-validated from the server.
+	InodeRevalidate uint64
+	// Number of times cached dentry nodes are re-validated from the server.
+	DnodeRevalidate uint64
+	// Number of times an inode cache is cleared.
+	DataInvalidate uint64
+	// Number of times cached inode attributes are invalidated.
+	AttributeInvalidate uint64
+	// Number of times files or directories have been open()'d.
+	VFSOpen uint64
+	// Number of times a directory lookup has occurred.
+	VFSLookup uint64
+	// Number of times permissions have been checked.
+	VFSAccess uint64
+	// Number of updates (and potential writes) to pages.
+	VFSUpdatePage uint64
+	// Number of pages read directly via mmap()'d files.
+	VFSReadPage uint64
+	// Number of times a group of pages have been read.
+	VFSReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	VFSWritePage uint64
+	// Number of times a group of pages have been written.
+	VFSWritePages uint64
+	// Number of times directory entries have been read with getdents().
+	VFSGetdents uint64
+	// Number of times attributes have been set on inodes.
+	VFSSetattr uint64
+	// Number of pending writes that have been forcefully flushed to the server.
+	VFSFlush uint64
+	// Number of times fsync() has been called on directories and files.
+	VFSFsync uint64
+	// Number of times locking has been attempted on a file.
+	VFSLock uint64
+	// Number of times files have been closed and released.
+	VFSFileRelease uint64
+	// Unknown.  Possibly unused.
+	CongestionWait uint64
+	// Number of times files have been truncated.
+	Truncation uint64
+	// Number of times a file has been grown due to writes beyond its existing end.
+	WriteExtension uint64
+	// Number of times a file was removed while still open by another process.
+	SillyRename uint64
+	// Number of times the NFS server gave less data than expected while reading.
+	ShortRead uint64
+	// Number of times the NFS server wrote less data than expected while writing.
+	ShortWrite uint64
+	// Number of times the NFS server indicated EJUKEBOX; retrieving data from
+	// offline storage.
+	JukeboxDelay uint64
+	// Number of NFS v4.1+ pNFS reads.
+	PNFSRead uint64
+	// Number of NFS v4.1+ pNFS writes.
+	PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+	// The name of the operation.
+	Operation string
+	// Number of requests performed for this operation.
+	Requests uint64
+	// Number of times an actual RPC request has been transmitted for this operation.
+	Transmissions uint64
+	// Number of times a request has had a major timeout.
+	MajorTimeouts uint64
+	// Number of bytes sent for this operation, including RPC headers and payload.
+	BytesSent uint64
+	// Number of bytes received for this operation, including RPC headers and payload.
+	BytesReceived uint64
+	// Duration all requests spent queued for transmission before they were sent.
+	CumulativeQueueMilliseconds uint64
+	// Duration it took to get a reply back after the request was transmitted.
+	CumulativeTotalResponseMilliseconds uint64
+	// Duration from when a request was enqueued to when it was completely handled.
+	CumulativeTotalRequestMilliseconds uint64
+	// The count of operations that complete with tk_status < 0.  These statuses usually indicate error conditions.
+	Errors uint64
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+	// The transport protocol used for the NFS mount.
+	Protocol string
+	// The local port used for the NFS mount.
+	Port uint64
+	// Number of times the client has had to establish a connection from scratch
+	// to the NFS server.
+	Bind uint64
+	// Number of times the client has made a TCP connection to the NFS server.
+	Connect uint64
+	// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+	// spent waiting for connections to the server to be established.
+	ConnectIdleTime uint64
+	// Duration since the NFS mount last saw any RPC traffic.
+	IdleTimeSeconds uint64
+	// Number of RPC requests for this mount sent to the NFS server.
+	Sends uint64
+	// Number of RPC responses for this mount received from the NFS server.
+	Receives uint64
+	// Number of times the NFS server sent a response with a transaction ID
+	// unknown to this client.
+	BadTransactionIDs uint64
+	// A running counter, incremented on each request as the current difference
+	// ebetween sends and receives.
+	CumulativeActiveRequests uint64
+	// A running counter, incremented on each request by the current backlog
+	// queue size.
+	CumulativeBacklog uint64
+
+	// Stats below only available with stat version 1.1.
+
+	// Maximum number of simultaneously active RPC requests ever used.
+	MaximumRPCSlotsUsed uint64
+	// A running counter, incremented on each request as the current size of the
+	// sending queue.
+	CumulativeSendingQueue uint64
+	// A running counter, incremented on each request as the current size of the
+	// pending queue.
+	CumulativePendingQueue uint64
+
+	// Stats below only available with stat version 1.1.
+	// Transport over RDMA
+
+	// accessed when sending a call
+	ReadChunkCount   uint64
+	WriteChunkCount  uint64
+	ReplyChunkCount  uint64
+	TotalRdmaRequest uint64
+
+	// rarely accessed error counters
+	PullupCopyCount      uint64
+	HardwayRegisterCount uint64
+	FailedMarshalCount   uint64
+	BadReplyCount        uint64
+	MrsRecovered         uint64
+	MrsOrphaned          uint64
+	MrsAllocated         uint64
+	EmptySendctxQ        uint64
+
+	// accessed when receiving a reply
+	TotalRdmaReply    uint64
+	FixupCopyCount    uint64
+	ReplyWaitsForSend uint64
+	LocalInvNeeded    uint64
+	NomsgCallCount    uint64
+	BcallCount        uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+	const (
+		device            = "device"
+		statVersionPrefix = "statvers="
+
+		nfs3Type = "nfs"
+		nfs4Type = "nfs4"
+	)
+
+	var mounts []*Mount
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Only look for device entries in this function
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 || ss[0] != device {
+			continue
+		}
+
+		m, err := parseMount(ss)
+		if err != nil {
+			return nil, err
+		}
+
+		// Does this mount also possess statistics information?
+		if len(ss) > deviceEntryLen {
+			// Only NFSv3 and v4 are supported for parsing statistics
+			if m.Type != nfs3Type && m.Type != nfs4Type {
+				return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
+			}
+
+			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+			stats, err := parseMountStatsNFS(s, statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			m.Stats = stats
+		}
+
+		mounts = append(mounts, m)
+	}
+
+	return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+//
+//	device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+	if len(ss) < deviceEntryLen {
+		return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+	}
+
+	// Check for specific words appearing at specific indices to ensure
+	// the format is consistent with what we expect
+	format := []struct {
+		i int
+		s string
+	}{
+		{i: 0, s: "device"},
+		{i: 2, s: "mounted"},
+		{i: 3, s: "on"},
+		{i: 5, s: "with"},
+		{i: 6, s: "fstype"},
+	}
+
+	for _, f := range format {
+		if ss[f.i] != f.s {
+			return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+		}
+	}
+
+	return &Mount{
+		Device: ss[1],
+		Mount:  ss[4],
+		Type:   ss[7],
+	}, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+	// Field indicators for parsing specific types of data
+	const (
+		fieldOpts       = "opts:"
+		fieldAge        = "age:"
+		fieldBytes      = "bytes:"
+		fieldEvents     = "events:"
+		fieldPerOpStats = "per-op"
+		fieldTransport  = "xprt:"
+	)
+
+	stats := &MountStatsNFS{
+		StatVersion: statVersion,
+	}
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			break
+		}
+
+		switch ss[0] {
+		case fieldOpts:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			if stats.Opts == nil {
+				stats.Opts = map[string]string{}
+			}
+			for _, opt := range strings.Split(ss[1], ",") {
+				split := strings.Split(opt, "=")
+				if len(split) == 2 {
+					stats.Opts[split[0]] = split[1]
+				} else {
+					stats.Opts[opt] = ""
+				}
+			}
+		case fieldAge:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			// Age integer is in seconds
+			d, err := time.ParseDuration(ss[1] + "s")
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Age = d
+		case fieldBytes:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			bstats, err := parseNFSBytesStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Bytes = *bstats
+		case fieldEvents:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
+			}
+			estats, err := parseNFSEventsStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Events = *estats
+		case fieldTransport:
+			if len(ss) < 3 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
+			}
+
+			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Transport = append(stats.Transport, *tstats)
+		}
+
+		// When encountering "per-operation statistics", we must break this
+		// loop and parse them separately to ensure we can terminate parsing
+		// before reaching another device entry; hence why this 'if' statement
+		// is not just another switch case
+		if ss[0] == fieldPerOpStats {
+			break
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	// NFS per-operation stats appear last before the next device entry
+	perOpStats, err := parseNFSOperationStats(s)
+	if err != nil {
+		return nil, err
+	}
+
+	stats.Operations = perOpStats
+
+	return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+	if len(ss) != fieldBytesLen {
+		return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
+	}
+
+	ns := make([]uint64, 0, fieldBytesLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSBytesStats{
+		Read:        ns[0],
+		Write:       ns[1],
+		DirectRead:  ns[2],
+		DirectWrite: ns[3],
+		ReadTotal:   ns[4],
+		WriteTotal:  ns[5],
+		ReadPages:   ns[6],
+		WritePages:  ns[7],
+	}, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+	if len(ss) != fieldEventsLen {
+		return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
+	}
+
+	ns := make([]uint64, 0, fieldEventsLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSEventsStats{
+		InodeRevalidate:     ns[0],
+		DnodeRevalidate:     ns[1],
+		DataInvalidate:      ns[2],
+		AttributeInvalidate: ns[3],
+		VFSOpen:             ns[4],
+		VFSLookup:           ns[5],
+		VFSAccess:           ns[6],
+		VFSUpdatePage:       ns[7],
+		VFSReadPage:         ns[8],
+		VFSReadPages:        ns[9],
+		VFSWritePage:        ns[10],
+		VFSWritePages:       ns[11],
+		VFSGetdents:         ns[12],
+		VFSSetattr:          ns[13],
+		VFSFlush:            ns[14],
+		VFSFsync:            ns[15],
+		VFSLock:             ns[16],
+		VFSFileRelease:      ns[17],
+		CongestionWait:      ns[18],
+		Truncation:          ns[19],
+		WriteExtension:      ns[20],
+		SillyRename:         ns[21],
+		ShortRead:           ns[22],
+		ShortWrite:          ns[23],
+		JukeboxDelay:        ns[24],
+		PNFSRead:            ns[25],
+		PNFSWrite:           ns[26],
+	}, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+	const (
+		// Minimum number of expected fields in each per-operation statistics set
+		minFields = 9
+	)
+
+	var ops []NFSOperationStats
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			// Must break when reading a blank line after per-operation stats to
+			// enable top-level function to parse the next device entry
+			break
+		}
+
+		if len(ss) < minFields {
+			return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
+		}
+
+		// Skip string operation name for integers
+		ns := make([]uint64, 0, minFields-1)
+		for _, st := range ss[1:] {
+			n, err := strconv.ParseUint(st, 10, 64)
+			if err != nil {
+				return nil, err
+			}
+
+			ns = append(ns, n)
+		}
+		opStats := NFSOperationStats{
+			Operation:                           strings.TrimSuffix(ss[0], ":"),
+			Requests:                            ns[0],
+			Transmissions:                       ns[1],
+			MajorTimeouts:                       ns[2],
+			BytesSent:                           ns[3],
+			BytesReceived:                       ns[4],
+			CumulativeQueueMilliseconds:         ns[5],
+			CumulativeTotalResponseMilliseconds: ns[6],
+			CumulativeTotalRequestMilliseconds:  ns[7],
+		}
+
+		if len(ns) > 8 {
+			opStats.Errors = ns[8]
+		}
+
+		ops = append(ops, opStats)
+	}
+
+	return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	// Extract the protocol field. It is the only string value in the line
+	protocol := ss[0]
+	ss = ss[1:]
+
+	switch statVersion {
+	case statVersion10:
+		var expectedLength int
+		switch protocol {
+		case "tcp":
+			expectedLength = fieldTransport10TCPLen
+		case "udp":
+			expectedLength = fieldTransport10UDPLen
+		default:
+			return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
+		}
+		if len(ss) != expectedLength {
+			return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
+		}
+	case statVersion11:
+		var expectedLength int
+		switch protocol {
+		case "tcp":
+			expectedLength = fieldTransport11TCPLen
+		case "udp":
+			expectedLength = fieldTransport11UDPLen
+		case "rdma":
+			expectedLength = fieldTransport11RDMAMinLen
+		default:
+			return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
+		}
+		if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+			(protocol == "rdma" && len(ss) < expectedLength) {
+			return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
+		}
+	default:
+		return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
+	}
+
+	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+	// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+	// the TCP length here.
+	//
+	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
+	// only v1.0 stats are present.
+	// See: https://github.com/prometheus/node_exporter/issues/571.
+	//
+	// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+	ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
+	for i, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns[i] = n
+	}
+
+	// The fields differ depending on the transport protocol (TCP or UDP)
+	// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+	//
+	// For the udp RPC transport there is no connection count, connect idle time,
+	// or idle time (fields #3, #4, and #5); all other fields are the same. So
+	// we set them to 0 here.
+	switch protocol {
+	case "udp":
+		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	case "tcp":
+		ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+	case "rdma":
+		ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
+	}
+
+	return &NFSTransportStats{
+		// NFS xprt over tcp or udp
+		Protocol:                 protocol,
+		Port:                     ns[0],
+		Bind:                     ns[1],
+		Connect:                  ns[2],
+		ConnectIdleTime:          ns[3],
+		IdleTimeSeconds:          ns[4],
+		Sends:                    ns[5],
+		Receives:                 ns[6],
+		BadTransactionIDs:        ns[7],
+		CumulativeActiveRequests: ns[8],
+		CumulativeBacklog:        ns[9],
+
+		// NFS xprt over tcp or udp
+		// And statVersion 1.1
+		MaximumRPCSlotsUsed:    ns[10],
+		CumulativeSendingQueue: ns[11],
+		CumulativePendingQueue: ns[12],
+
+		// NFS xprt over rdma
+		// And stat Version 1.1
+		ReadChunkCount:       ns[13],
+		WriteChunkCount:      ns[14],
+		ReplyChunkCount:      ns[15],
+		TotalRdmaRequest:     ns[16],
+		PullupCopyCount:      ns[17],
+		HardwayRegisterCount: ns[18],
+		FailedMarshalCount:   ns[19],
+		BadReplyCount:        ns[20],
+		MrsRecovered:         ns[21],
+		MrsOrphaned:          ns[22],
+		MrsAllocated:         ns[23],
+		EmptySendctxQ:        ns[24],
+		TotalRdmaReply:       ns[25],
+		FixupCopyCount:       ns[26],
+		ReplyWaitsForSend:    ns[27],
+		LocalInvNeeded:       ns[28],
+		NomsgCallCount:       ns[29],
+		BcallCount:           ns[30],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
new file mode 100644
index 0000000..316df5f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -0,0 +1,118 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
+// and contains netfilter conntrack statistics at one CPU core.
+type ConntrackStatEntry struct {
+	Entries       uint64
+	Searched      uint64
+	Found         uint64
+	New           uint64
+	Invalid       uint64
+	Ignore        uint64
+	Delete        uint64
+	DeleteList    uint64
+	Insert        uint64
+	InsertFailed  uint64
+	Drop          uint64
+	EarlyDrop     uint64
+	SearchRestart uint64
+}
+
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
+func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
+	return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
+}
+
+// Parses a slice of ConntrackStatEntries from the given filepath.
+func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
+	// This file is small and can be read with one syscall.
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		// Do not wrap this error so the caller can detect os.IsNotExist and
+		// similar conditions.
+		return nil, err
+	}
+
+	stat, err := parseConntrackStat(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
+	}
+
+	return stat, nil
+}
+
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
+func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
+	var entries []ConntrackStatEntry
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		conntrackEntry, err := parseConntrackStatEntry(fields)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, *conntrackEntry)
+	}
+
+	return entries, nil
+}
+
+// Parses a ConntrackStatEntry from given array of fields.
+func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
+	entries, err := util.ParseHexUint64s(fields)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
+	}
+	numEntries := len(entries)
+	if numEntries < 16 || numEntries > 17 {
+		return nil,
+			fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
+	}
+
+	stats := &ConntrackStatEntry{
+		Entries:      *entries[0],
+		Searched:     *entries[1],
+		Found:        *entries[2],
+		New:          *entries[3],
+		Invalid:      *entries[4],
+		Ignore:       *entries[5],
+		Delete:       *entries[6],
+		DeleteList:   *entries[7],
+		Insert:       *entries[8],
+		InsertFailed: *entries[9],
+		Drop:         *entries[10],
+		EarlyDrop:    *entries[11],
+	}
+
+	// Ignore missing search_restart on Linux < 2.6.35.
+	if numEntries == 17 {
+		stats.SearchRestart = *entries[16]
+	}
+
+	return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 0000000..e66208a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,205 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+	Name         string `json:"name"`          // The name of the interface.
+	RxBytes      uint64 `json:"rx_bytes"`      // Cumulative count of bytes received.
+	RxPackets    uint64 `json:"rx_packets"`    // Cumulative count of packets received.
+	RxErrors     uint64 `json:"rx_errors"`     // Cumulative count of receive errors encountered.
+	RxDropped    uint64 `json:"rx_dropped"`    // Cumulative count of packets dropped while receiving.
+	RxFIFO       uint64 `json:"rx_fifo"`       // Cumulative count of FIFO buffer errors.
+	RxFrame      uint64 `json:"rx_frame"`      // Cumulative count of packet framing errors.
+	RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+	RxMulticast  uint64 `json:"rx_multicast"`  // Cumulative count of multicast frames received by the device driver.
+	TxBytes      uint64 `json:"tx_bytes"`      // Cumulative count of bytes transmitted.
+	TxPackets    uint64 `json:"tx_packets"`    // Cumulative count of packets transmitted.
+	TxErrors     uint64 `json:"tx_errors"`     // Cumulative count of transmit errors encountered.
+	TxDropped    uint64 `json:"tx_dropped"`    // Cumulative count of packets dropped while transmitting.
+	TxFIFO       uint64 `json:"tx_fifo"`       // Cumulative count of FIFO buffer errors.
+	TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+	TxCarrier    uint64 `json:"tx_carrier"`    // Cumulative count of carrier losses detected by the device driver.
+	TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NetDev() (NetDev, error) {
+	return newNetDev(fs.proc.Path("net/dev"))
+}
+
+// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NetDev() (NetDev, error) {
+	return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return NetDev{}, err
+	}
+	defer f.Close()
+
+	netDev := NetDev{}
+	s := bufio.NewScanner(f)
+	for n := 0; s.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line, err := netDev.parseLine(s.Text())
+		if err != nil {
+			return netDev, err
+		}
+
+		netDev[line.Name] = *line
+	}
+
+	return netDev, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+	idx := strings.LastIndex(rawLine, ":")
+	if idx == -1 {
+		return nil, errors.New("invalid net/dev line, missing colon")
+	}
+	fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
+
+	var err error
+	line := &NetDevLine{}
+
+	// Interface Name
+	line.Name = strings.TrimSpace(rawLine[:idx])
+	if line.Name == "" {
+		return nil, errors.New("invalid net/dev line, empty interface name")
+	}
+
+	// RX
+	line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// TX
+	line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (netDev NetDev) Total() NetDevLine {
+	total := NetDevLine{}
+
+	names := make([]string, 0, len(netDev))
+	for _, ifc := range netDev {
+		names = append(names, ifc.Name)
+		total.RxBytes += ifc.RxBytes
+		total.RxPackets += ifc.RxPackets
+		total.RxErrors += ifc.RxErrors
+		total.RxDropped += ifc.RxDropped
+		total.RxFIFO += ifc.RxFIFO
+		total.RxFrame += ifc.RxFrame
+		total.RxCompressed += ifc.RxCompressed
+		total.RxMulticast += ifc.RxMulticast
+		total.TxBytes += ifc.TxBytes
+		total.TxPackets += ifc.TxPackets
+		total.TxErrors += ifc.TxErrors
+		total.TxDropped += ifc.TxDropped
+		total.TxFIFO += ifc.TxFIFO
+		total.TxCollisions += ifc.TxCollisions
+		total.TxCarrier += ifc.TxCarrier
+		total.TxCompressed += ifc.TxCompressed
+	}
+	sort.Strings(names)
+	total.Name = strings.Join(names, ", ")
+
+	return total
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000..f50b38e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+	netDevSNMP6 := make(NetDevSNMP6)
+
+	// The net/dev_snmp6 folders contain one file per interface
+	ifaceFiles, err := os.ReadDir(dir)
+	if err != nil {
+		// On systems with IPv6 disabled, this directory won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return netDevSNMP6, err
+		}
+		return netDevSNMP6, err
+	}
+
+	for _, iFaceFile := range ifaceFiles {
+		f, err := os.Open(dir + "/" + iFaceFile.Name())
+		if err != nil {
+			return netDevSNMP6, err
+		}
+		defer f.Close()
+
+		netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+		if err != nil {
+			return netDevSNMP6, err
+		}
+	}
+
+	return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+	m := make(map[string]uint64)
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		key, val := stat[0], stat[1]
+
+		// Expect stat name to contain "6" or be "ifIndex"
+		if strings.Contains(key, "6") || key == "ifIndex" {
+			v, err := strconv.ParseUint(val, 10, 64)
+			if err != nil {
+				return m, err
+			}
+
+			m[key] = v
+		}
+	}
+	return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
new file mode 100644
index 0000000..19e3378
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -0,0 +1,248 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	// Maximum size limit used by io.LimitReader while reading the content of the
+	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
+	// as each line represents a single used socket.
+	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
+	// With e.g. 150 Byte per line and the maximum number of 65535,
+	// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
+	readLimit = 4294967296 // Byte -> 4 GiB
+)
+
+// This contains generic data structures for both udp and tcp sockets.
+type (
+	// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
+	NetIPSocket []*netIPSocketLine
+
+	// NetIPSocketSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetIPSocket it does not collect
+	// the parsed lines into a slice.
+	NetIPSocketSummary struct {
+		// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
+		TxQueueLength uint64
+		// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
+		RxQueueLength uint64
+		// UsedSockets shows the total number of parsed lines representing the
+		// number of used sockets.
+		UsedSockets uint64
+		// Drops shows the total number of dropped packets of all UDP sockets.
+		Drops *uint64
+	}
+
+	// A single line parser for fields from /proc/net/{t,u}dp{,6}.
+	// Fields which are not used by IPSocket are skipped.
+	// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
+	// For the proc file format details, see https://linux.die.net/man/5/proc.
+	netIPSocketLine struct {
+		Sl        uint64
+		LocalAddr net.IP
+		LocalPort uint64
+		RemAddr   net.IP
+		RemPort   uint64
+		St        uint64
+		TxQueue   uint64
+		RxQueue   uint64
+		UID       uint64
+		Inode     uint64
+		Drops     *uint64
+	}
+)
+
+func newNetIPSocket(file string) (NetIPSocket, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var netIPSocket NetIPSocket
+	isUDP := strings.Contains(file, "udp")
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetIPSocketLine(fields, isUDP)
+		if err != nil {
+			return nil, err
+		}
+		netIPSocket = append(netIPSocket, line)
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return netIPSocket, nil
+}
+
+// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
+func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var netIPSocketSummary NetIPSocketSummary
+	var udpPacketDrops uint64
+	isUDP := strings.Contains(file, "udp")
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetIPSocketLine(fields, isUDP)
+		if err != nil {
+			return nil, err
+		}
+		netIPSocketSummary.TxQueueLength += line.TxQueue
+		netIPSocketSummary.RxQueueLength += line.RxQueue
+		netIPSocketSummary.UsedSockets++
+		if isUDP {
+			udpPacketDrops += *line.Drops
+			netIPSocketSummary.Drops = &udpPacketDrops
+		}
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return &netIPSocketSummary, nil
+}
+
+// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
+
+func parseIP(hexIP string) (net.IP, error) {
+	var byteIP []byte
+	byteIP, err := hex.DecodeString(hexIP)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
+	}
+	switch len(byteIP) {
+	case 4:
+		return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
+	case 16:
+		i := net.IP{
+			byteIP[3], byteIP[2], byteIP[1], byteIP[0],
+			byteIP[7], byteIP[6], byteIP[5], byteIP[4],
+			byteIP[11], byteIP[10], byteIP[9], byteIP[8],
+			byteIP[15], byteIP[14], byteIP[13], byteIP[12],
+		}
+		return i, nil
+	default:
+		return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
+	}
+}
+
+// parseNetIPSocketLine parses a single line, represented by a list of fields.
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
+	line := &netIPSocketLine{}
+	if len(fields) < 10 {
+		return nil, fmt.Errorf(
+			"%w: Less than 10 columns found %q",
+			ErrFileParse,
+			strings.Join(fields, " "),
+		)
+	}
+	var err error // parse error
+
+	// sl
+	s := strings.Split(fields[0], ":")
+	if len(s) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
+	}
+
+	if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
+	}
+	// local_address
+	l := strings.Split(fields[1], ":")
+	if len(l) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
+	}
+	if line.LocalAddr, err = parseIP(l[0]); err != nil {
+		return nil, err
+	}
+	if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
+	}
+
+	// remote_address
+	r := strings.Split(fields[2], ":")
+	if len(r) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
+	}
+	if line.RemAddr, err = parseIP(r[0]); err != nil {
+		return nil, err
+	}
+	if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
+	}
+
+	// st
+	if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
+	}
+
+	// tx_queue and rx_queue
+	q := strings.Split(fields[4], ":")
+	if len(q) != 2 {
+		return nil, fmt.Errorf(
+			"%w: Missing colon for tx/rx queues in socket line %q",
+			ErrFileParse,
+			fields[4],
+		)
+	}
+	if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
+	}
+	if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
+	}
+
+	// uid
+	if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+	}
+
+	// inode
+	if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+	}
+
+	// drops
+	if isUDP {
+		drops, err := strconv.ParseUint(fields[12], 0, 64)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+		}
+		line.Drops = &drops
+	}
+
+	return line, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
new file mode 100644
index 0000000..8d4b1ac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -0,0 +1,183 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// NetProtocolStats stores the contents from /proc/net/protocols.
+type NetProtocolStats map[string]NetProtocolStatLine
+
+// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
+// only care about the first six columns as the rest are not likely to change
+// and only serve to provide a set of capabilities for each protocol.
+type NetProtocolStatLine struct {
+	Name         string // 0 The name of the protocol
+	Size         uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
+	Sockets      int64  // 2 Number of sockets in use by this protocol
+	Memory       int64  // 3 Number of 4KB pages allocated by all sockets of this protocol
+	Pressure     int    // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
+	MaxHeader    uint64 // 5 Protocol specific max header size
+	Slab         bool   // 6 Indicates whether or not memory is allocated from the SLAB
+	ModuleName   string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
+	Capabilities NetProtocolCapabilities
+}
+
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
+type NetProtocolCapabilities struct {
+	Close               bool // 8
+	Connect             bool // 9
+	Disconnect          bool // 10
+	Accept              bool // 11
+	IoCtl               bool // 12
+	Init                bool // 13
+	Destroy             bool // 14
+	Shutdown            bool // 15
+	SetSockOpt          bool // 16
+	GetSockOpt          bool // 17
+	SendMsg             bool // 18
+	RecvMsg             bool // 19
+	SendPage            bool // 20
+	Bind                bool // 21
+	BacklogRcv          bool // 22
+	Hash                bool // 23
+	UnHash              bool // 24
+	GetPort             bool // 25
+	EnterMemoryPressure bool // 26
+}
+
+// NetProtocols reads stats from /proc/net/protocols and returns a map of
+// PortocolStatLine entries. As of this writing no official Linux Documentation
+// exists, however the source is fairly self-explanatory and the format seems
+// stable since its introduction in 2.6.12-rc2
+// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
+// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
+func (fs FS) NetProtocols() (NetProtocolStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
+	if err != nil {
+		return NetProtocolStats{}, err
+	}
+	return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
+}
+
+func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
+	nps := NetProtocolStats{}
+
+	// Skip the header line
+	s.Scan()
+
+	for s.Scan() {
+		line, err := nps.parseLine(s.Text())
+		if err != nil {
+			return NetProtocolStats{}, err
+		}
+
+		nps[line.Name] = *line
+	}
+	return nps, nil
+}
+
+func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
+	line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
+	var err error
+	const enabled = "yes"
+	const disabled = "no"
+
+	fields := strings.Fields(rawLine)
+	line.Name = fields[0]
+	line.Size, err = strconv.ParseUint(fields[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	switch fields[4] {
+	case enabled:
+		line.Pressure = 1
+	case disabled:
+		line.Pressure = 0
+	default:
+		line.Pressure = -1
+	}
+	line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	switch fields[6] {
+	case enabled:
+		line.Slab = true
+	case disabled:
+		line.Slab = false
+	default:
+		return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
+	}
+	line.ModuleName = fields[7]
+
+	err = line.Capabilities.parseCapabilities(fields[8:])
+	if err != nil {
+		return nil, err
+	}
+
+	return line, nil
+}
+
+func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
+	// The capabilities are all bools so we can loop over to map them
+	capabilityFields := [...]*bool{
+		&pc.Close,
+		&pc.Connect,
+		&pc.Disconnect,
+		&pc.Accept,
+		&pc.IoCtl,
+		&pc.Init,
+		&pc.Destroy,
+		&pc.Shutdown,
+		&pc.SetSockOpt,
+		&pc.GetSockOpt,
+		&pc.SendMsg,
+		&pc.RecvMsg,
+		&pc.SendPage,
+		&pc.Bind,
+		&pc.BacklogRcv,
+		&pc.Hash,
+		&pc.UnHash,
+		&pc.GetPort,
+		&pc.EnterMemoryPressure,
+	}
+
+	for i := 0; i < len(capabilities); i++ {
+		switch capabilities[i] {
+		case "y":
+			*capabilityFields[i] = true
+		case "n":
+			*capabilityFields[i] = false
+		default:
+			return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 0000000..deb7029
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+const (
+	blackholeRepresentation string = "*"
+	blackholeIfaceName      string = "blackhole"
+	routeLineColumns        int    = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+	Iface       string
+	Destination uint32
+	Gateway     uint32
+	Flags       uint32
+	RefCnt      uint32
+	Use         uint32
+	Metric      uint32
+	Mask        uint32
+	MTU         uint32
+	Window      uint32
+	IRTT        uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+	return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+
+	routelines, err := parseNetRoute(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+	}
+	return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+	var routelines []NetRouteLine
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		routeline, err := parseNetRouteLine(fields)
+		if err != nil {
+			return nil, err
+		}
+		routelines = append(routelines, *routeline)
+	}
+	return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+	if len(fields) != routeLineColumns {
+		return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+	}
+	iface := fields[0]
+	if iface == blackholeRepresentation {
+		iface = blackholeIfaceName
+	}
+	destination, err := strconv.ParseUint(fields[1], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	gateway, err := strconv.ParseUint(fields[2], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	flags, err := strconv.ParseUint(fields[3], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	use, err := strconv.ParseUint(fields[5], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	metric, err := strconv.ParseUint(fields[6], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	mask, err := strconv.ParseUint(fields[7], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	mtu, err := strconv.ParseUint(fields[8], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	window, err := strconv.ParseUint(fields[9], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	irtt, err := strconv.ParseUint(fields[10], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	routeline := &NetRouteLine{
+		Iface:       iface,
+		Destination: uint32(destination),
+		Gateway:     uint32(gateway),
+		Flags:       uint32(flags),
+		RefCnt:      uint32(refcnt),
+		Use:         uint32(use),
+		Metric:      uint32(metric),
+		Mask:        uint32(mask),
+		MTU:         uint32(mtu),
+		Window:      uint32(window),
+		IRTT:        uint32(irtt),
+	}
+	return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
new file mode 100644
index 0000000..fae62b1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -0,0 +1,162 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
+// respectively.
+type NetSockstat struct {
+	// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
+	Used      *int
+	Protocols []NetSockstatProtocol
+}
+
+// A NetSockstatProtocol contains statistics about a given socket protocol.
+// Pointer fields indicate that the value may or may not be present on any
+// given protocol.
+type NetSockstatProtocol struct {
+	Protocol string
+	InUse    int
+	Orphan   *int
+	TW       *int
+	Alloc    *int
+	Mem      *int
+	Memory   *int
+}
+
+// NetSockstat retrieves IPv4 socket statistics.
+func (fs FS) NetSockstat() (*NetSockstat, error) {
+	return readSockstat(fs.proc.Path("net", "sockstat"))
+}
+
+// NetSockstat6 retrieves IPv6 socket statistics.
+//
+// If IPv6 is disabled on this kernel, the returned error can be checked with
+// os.IsNotExist.
+func (fs FS) NetSockstat6() (*NetSockstat, error) {
+	return readSockstat(fs.proc.Path("net", "sockstat6"))
+}
+
+// readSockstat opens and parses a NetSockstat from the input file.
+func readSockstat(name string) (*NetSockstat, error) {
+	// This file is small and can be read with one syscall.
+	b, err := util.ReadFileNoStat(name)
+	if err != nil {
+		// Do not wrap this error so the caller can detect os.IsNotExist and
+		// similar conditions.
+		return nil, err
+	}
+
+	stat, err := parseSockstat(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
+	}
+
+	return stat, nil
+}
+
+// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
+func parseSockstat(r io.Reader) (*NetSockstat, error) {
+	var stat NetSockstat
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Expect a minimum of a protocol and one key/value pair.
+		fields := strings.Split(s.Text(), " ")
+		if len(fields) < 3 {
+			return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
+		}
+
+		// The remaining fields are key/value pairs.
+		kvs, err := parseSockstatKVs(fields[1:])
+		if err != nil {
+			return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
+		}
+
+		// The first field is the protocol. We must trim its colon suffix.
+		proto := strings.TrimSuffix(fields[0], ":")
+		switch proto {
+		case "sockets":
+			// Special case: IPv4 has a sockets "used" key/value pair that we
+			// embed at the top level of the structure.
+			used := kvs["used"]
+			stat.Used = &used
+		default:
+			// Parse all other lines as individual protocols.
+			nsp := parseSockstatProtocol(kvs)
+			nsp.Protocol = proto
+			stat.Protocols = append(stat.Protocols, nsp)
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	return &stat, nil
+}
+
+// parseSockstatKVs parses a string slice into a map of key/value pairs.
+func parseSockstatKVs(kvs []string) (map[string]int, error) {
+	if len(kvs)%2 != 0 {
+		return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
+	}
+
+	// Iterate two values at a time to gather key/value pairs.
+	out := make(map[string]int, len(kvs)/2)
+	for i := 0; i < len(kvs); i += 2 {
+		vp := util.NewValueParser(kvs[i+1])
+		out[kvs[i]] = vp.Int()
+
+		if err := vp.Err(); err != nil {
+			return nil, err
+		}
+	}
+
+	return out, nil
+}
+
+// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
+func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
+	var nsp NetSockstatProtocol
+	for k, v := range kvs {
+		// Capture the range variable to ensure we get unique pointers for
+		// each of the optional fields.
+		v := v
+		switch k {
+		case "inuse":
+			nsp.InUse = v
+		case "orphan":
+			nsp.Orphan = &v
+		case "tw":
+			nsp.TW = &v
+		case "alloc":
+			nsp.Alloc = &v
+		case "mem":
+			nsp.Mem = &v
+		case "memory":
+			nsp.Memory = &v
+		}
+	}
+
+	return nsp
+}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
new file mode 100644
index 0000000..71c8059
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// For the proc file format details,
+// See:
+// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
+type SoftnetStat struct {
+	// Number of processed packets.
+	Processed uint32
+	// Number of dropped packets.
+	Dropped uint32
+	// Number of times processing packets ran out of quota.
+	TimeSqueezed uint32
+	// Number of collision occur while obtaining device lock while transmitting.
+	CPUCollision uint32
+	// Number of times cpu woken up received_rps.
+	ReceivedRps uint32
+	// number of times flow limit has been reached.
+	FlowLimitCount uint32
+	// Softnet backlog status.
+	SoftnetBacklogLen uint32
+	// CPU id owning this softnet_data.
+	Index uint32
+	// softnet_data's Width.
+	Width int
+}
+
+var softNetProcFile = "net/softnet_stat"
+
+// NetSoftnetStat reads data from /proc/net/softnet_stat.
+func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
+	if err != nil {
+		return nil, err
+	}
+
+	entries, err := parseSoftnet(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
+	}
+
+	return entries, nil
+}
+
+func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
+	const minColumns = 9
+
+	s := bufio.NewScanner(r)
+
+	var stats []SoftnetStat
+	cpuIndex := 0
+	for s.Scan() {
+		columns := strings.Fields(s.Text())
+		width := len(columns)
+		softnetStat := SoftnetStat{}
+
+		if width < minColumns {
+			return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
+		}
+
+		// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+		if width >= minColumns {
+			us, err := parseHexUint32s(columns[0:9])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.Processed = us[0]
+			softnetStat.Dropped = us[1]
+			softnetStat.TimeSqueezed = us[2]
+			softnetStat.CPUCollision = us[8]
+		}
+
+		// Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+		if width >= 10 {
+			us, err := parseHexUint32s(columns[9:10])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.ReceivedRps = us[0]
+		}
+
+		// Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+		if width >= 11 {
+			us, err := parseHexUint32s(columns[10:11])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.FlowLimitCount = us[0]
+		}
+
+		// Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+		if width >= 13 {
+			us, err := parseHexUint32s(columns[11:13])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.SoftnetBacklogLen = us[0]
+			softnetStat.Index = us[1]
+		} else {
+			// For older kernels, create the Index based on the scan line number.
+			softnetStat.Index = uint32(cpuIndex)
+		}
+		softnetStat.Width = width
+		stats = append(stats, softnetStat)
+		cpuIndex++
+	}
+
+	return stats, nil
+}
+
+func parseHexUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
new file mode 100644
index 0000000..0396d72
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -0,0 +1,68 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+	// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
+	NetTCP []*netIPSocketLine
+
+	// NetTCPSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetTCP it does not collect
+	// the parsed lines into a slice.
+	NetTCPSummary NetIPSocketSummary
+)
+
+// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
+func (fs FS) NetTCP() (NetTCP, error) {
+	return newNetTCP(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
+func (fs FS) NetTCP6() (NetTCP, error) {
+	return newNetTCP(fs.proc.Path("net/tcp6"))
+}
+
+// NetTCPSummary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
+func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
+	return newNetTCPSummary(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6Summary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
+func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
+	return newNetTCPSummary(fs.proc.Path("net/tcp6"))
+}
+
+// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
+func newNetTCP(file string) (NetTCP, error) {
+	n, err := newNetIPSocket(file)
+	n1 := NetTCP(n)
+	return n1, err
+}
+
+func newNetTCPSummary(file string) (*NetTCPSummary, error) {
+	n, err := newNetIPSocketSummary(file)
+	if n == nil {
+		return nil, err
+	}
+	n1 := NetTCPSummary(*n)
+	return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 0000000..13994c1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+	// number of TX sessions currently installed where host handles cryptography
+	TLSCurrTxSw int
+	// number of RX sessions currently installed where host handles cryptography
+	TLSCurrRxSw int
+	// number of TX sessions currently installed where NIC handles cryptography
+	TLSCurrTxDevice int
+	// number of RX sessions currently installed where NIC handles cryptography
+	TLSCurrRxDevice int
+	//number of TX sessions opened with host cryptography
+	TLSTxSw int
+	//number of RX sessions opened with host cryptography
+	TLSRxSw int
+	// number of TX sessions opened with NIC cryptography
+	TLSTxDevice int
+	// number of RX sessions opened with NIC cryptography
+	TLSRxDevice int
+	// record decryption failed (e.g. due to incorrect authentication tag)
+	TLSDecryptError int
+	//  number of RX resyncs sent to NICs handling cryptography
+	TLSRxDeviceResync int
+	// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+	TLSDecryptRetry int
+	// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+	TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return TLSStat{}, err
+	}
+
+	return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+	file, err := os.Open(fs.proc.Path("net/tls_stat"))
+	if err != nil {
+		return TLSStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		tlsstat = TLSStat{}
+		s       = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return TLSStat{}, err
+		}
+
+		switch name {
+		case "TlsCurrTxSw":
+			tlsstat.TLSCurrTxSw = value
+		case "TlsCurrRxSw":
+			tlsstat.TLSCurrRxSw = value
+		case "TlsCurrTxDevice":
+			tlsstat.TLSCurrTxDevice = value
+		case "TlsCurrRxDevice":
+			tlsstat.TLSCurrRxDevice = value
+		case "TlsTxSw":
+			tlsstat.TLSTxSw = value
+		case "TlsRxSw":
+			tlsstat.TLSRxSw = value
+		case "TlsTxDevice":
+			tlsstat.TLSTxDevice = value
+		case "TlsRxDevice":
+			tlsstat.TLSRxDevice = value
+		case "TlsDecryptError":
+			tlsstat.TLSDecryptError = value
+		case "TlsRxDeviceResync":
+			tlsstat.TLSRxDeviceResync = value
+		case "TlsDecryptRetry":
+			tlsstat.TLSDecryptRetry = value
+		case "TlsRxNoPadViolation":
+			tlsstat.TLSRxNoPadViolation = value
+		}
+
+	}
+
+	return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
new file mode 100644
index 0000000..9ac3daf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_udp.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+	// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
+	NetUDP []*netIPSocketLine
+
+	// NetUDPSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetUDP it does not collect
+	// the parsed lines into a slice.
+	NetUDPSummary NetIPSocketSummary
+)
+
+// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp.
+func (fs FS) NetUDP() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp6.
+func (fs FS) NetUDP6() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp6"))
+}
+
+// NetUDPSummary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp.
+func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6Summary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp6.
+func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp6"))
+}
+
+// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
+func newNetUDP(file string) (NetUDP, error) {
+	n, err := newNetIPSocket(file)
+	n1 := NetUDP(n)
+	return n1, err
+}
+
+func newNetUDPSummary(file string) (*NetUDPSummary, error) {
+	n, err := newNetIPSocketSummary(file)
+	if n == nil {
+		return nil, err
+	}
+	n1 := NetUDPSummary(*n)
+	return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
new file mode 100644
index 0000000..d7e0cac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -0,0 +1,257 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
+// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
+
+// Constants for the various /proc/net/unix enumerations.
+// TODO: match against x/sys/unix or similar?
+const (
+	netUnixTypeStream    = 1
+	netUnixTypeDgram     = 2
+	netUnixTypeSeqpacket = 5
+
+	netUnixFlagDefault = 0
+	netUnixFlagListen  = 1 << 16
+
+	netUnixStateUnconnected  = 1
+	netUnixStateConnecting   = 2
+	netUnixStateConnected    = 3
+	netUnixStateDisconnected = 4
+)
+
+// NetUNIXType is the type of the type field.
+type NetUNIXType uint64
+
+// NetUNIXFlags is the type of the flags field.
+type NetUNIXFlags uint64
+
+// NetUNIXState is the type of the state field.
+type NetUNIXState uint64
+
+// NetUNIXLine represents a line of /proc/net/unix.
+type NetUNIXLine struct {
+	KernelPtr string
+	RefCount  uint64
+	Protocol  uint64
+	Flags     NetUNIXFlags
+	Type      NetUNIXType
+	State     NetUNIXState
+	Inode     uint64
+	Path      string
+}
+
+// NetUNIX holds the data read from /proc/net/unix.
+type NetUNIX struct {
+	Rows []*NetUNIXLine
+}
+
+// NetUNIX returns data read from /proc/net/unix.
+func (fs FS) NetUNIX() (*NetUNIX, error) {
+	return readNetUNIX(fs.proc.Path("net/unix"))
+}
+
+// readNetUNIX reads data in /proc/net/unix format from the specified file.
+func readNetUNIX(file string) (*NetUNIX, error) {
+	// This file could be quite large and a streaming read is desirable versus
+	// reading the entire contents at once.
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseNetUNIX(f)
+}
+
+// parseNetUNIX creates a NetUnix structure from the incoming stream.
+func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
+	// Begin scanning by checking for the existence of Inode.
+	s := bufio.NewScanner(r)
+	s.Scan()
+
+	// From the man page of proc(5), it does not contain an Inode field,
+	// but in actually it exists. This code works for both cases.
+	hasInode := strings.Contains(s.Text(), "Inode")
+
+	// Expect a minimum number of fields, but Inode and Path are optional:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+	minFields := 6
+	if hasInode {
+		minFields++
+	}
+
+	var nu NetUNIX
+	for s.Scan() {
+		line := s.Text()
+		item, err := nu.parseLine(line, hasInode, minFields)
+		if err != nil {
+			return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
+		}
+
+		nu.Rows = append(nu.Rows, item)
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
+	}
+
+	return &nu, nil
+}
+
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
+	fields := strings.Fields(line)
+
+	l := len(fields)
+	if l < minFields {
+		return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
+	}
+
+	// Field offsets are as follows:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+
+	kernelPtr := strings.TrimSuffix(fields[0], ":")
+
+	users, err := u.parseUsers(fields[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
+	}
+
+	flags, err := u.parseFlags(fields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
+	}
+
+	typ, err := u.parseType(fields[4])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
+	}
+
+	state, err := u.parseState(fields[5])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
+	}
+
+	var inode uint64
+	if hasInode {
+		inode, err = u.parseInode(fields[6])
+		if err != nil {
+			return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
+		}
+	}
+
+	n := &NetUNIXLine{
+		KernelPtr: kernelPtr,
+		RefCount:  users,
+		Type:      typ,
+		Flags:     flags,
+		State:     state,
+		Inode:     inode,
+	}
+
+	// Path field is optional.
+	if l > minFields {
+		// Path occurs at either index 6 or 7 depending on whether inode is
+		// already present.
+		pathIdx := 7
+		if !hasInode {
+			pathIdx--
+		}
+
+		n.Path = fields[pathIdx]
+	}
+
+	return n, nil
+}
+
+func (u NetUNIX) parseUsers(s string) (uint64, error) {
+	return strconv.ParseUint(s, 16, 32)
+}
+
+func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
+	typ, err := strconv.ParseUint(s, 16, 16)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXType(typ), nil
+}
+
+func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
+	flags, err := strconv.ParseUint(s, 16, 32)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXFlags(flags), nil
+}
+
+func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
+	st, err := strconv.ParseInt(s, 16, 8)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXState(st), nil
+}
+
+func (u NetUNIX) parseInode(s string) (uint64, error) {
+	return strconv.ParseUint(s, 10, 64)
+}
+
+func (t NetUNIXType) String() string {
+	switch t {
+	case netUnixTypeStream:
+		return "stream"
+	case netUnixTypeDgram:
+		return "dgram"
+	case netUnixTypeSeqpacket:
+		return "seqpacket"
+	}
+	return "unknown"
+}
+
+func (f NetUNIXFlags) String() string {
+	switch f {
+	case netUnixFlagListen:
+		return "listen"
+	default:
+		return "default"
+	}
+}
+
+func (s NetUNIXState) String() string {
+	switch s {
+	case netUnixStateUnconnected:
+		return "unconnected"
+	case netUnixStateConnecting:
+		return "connecting"
+	case netUnixStateConnected:
+		return "connected"
+	case netUnixStateDisconnected:
+		return "disconnected"
+	}
+	return "unknown"
+}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000..7c597bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+	Name string
+
+	// Status is the current 4-digit hex value status of the interface.
+	Status uint64
+
+	// QualityLink is the link quality.
+	QualityLink int
+
+	// QualityLevel is the signal gain (dBm).
+	QualityLevel int
+
+	// QualityNoise is the signal noise baseline (dBm).
+	QualityNoise int
+
+	// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+	DiscardedNwid int
+
+	// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+	DiscardedCrypt int
+
+	// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+	DiscardedFrag int
+
+	// DiscardedRetry is the number of discarded packets that reached max MAC retries.
+	DiscardedRetry int
+
+	// DiscardedMisc is the number of discarded packets for other reasons.
+	DiscardedMisc int
+
+	// MissedBeacon is the number of missed beacons/superframe.
+	MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+	if err != nil {
+		return nil, err
+	}
+
+	m, err := parseWireless(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+	}
+
+	return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-|   Quality        |   Discarded packets               | Missed | WE
+face | tus | link level noise |  nwid  crypt   frag  retry   misc | beacon | 22
+ eth1: 0000    5.  -256.  -10.       0      1      0     3      0        0
+ eth2: 0000    5.  -256.  -20.       0      2      0     4      0        0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+	var (
+		interfaces []*Wireless
+		scanner    = bufio.NewScanner(r)
+	)
+
+	for n := 0; scanner.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line := scanner.Text()
+
+		parts := strings.Split(line, ":")
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+		}
+
+		name := strings.TrimSpace(parts[0])
+		stats := strings.Fields(parts[1])
+
+		if len(stats) < 10 {
+			return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+		}
+
+		status, err := strconv.ParseUint(stats[0], 16, 16)
+		if err != nil {
+			return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+		}
+
+		qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+		}
+
+		qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+		}
+
+		qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+		}
+
+		dnwid, err := strconv.Atoi(stats[4])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+		}
+
+		dcrypt, err := strconv.Atoi(stats[5])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+		}
+
+		dfrag, err := strconv.Atoi(stats[6])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+		}
+
+		dretry, err := strconv.Atoi(stats[7])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+		}
+
+		dmisc, err := strconv.Atoi(stats[8])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+		}
+
+		mbeacon, err := strconv.Atoi(stats[9])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+		}
+
+		w := &Wireless{
+			Name:           name,
+			Status:         status,
+			QualityLink:    qlink,
+			QualityLevel:   qlevel,
+			QualityNoise:   qnoise,
+			DiscardedNwid:  dnwid,
+			DiscardedCrypt: dcrypt,
+			DiscardedFrag:  dfrag,
+			DiscardedRetry: dretry,
+			DiscardedMisc:  dmisc,
+			MissedBeacon:   mbeacon,
+		}
+
+		interfaces = append(interfaces, w)
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+	}
+
+	return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
new file mode 100644
index 0000000..932ef20
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -0,0 +1,189 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+	// All errors which are not matched by other
+	XfrmInError int
+	// No buffer is left
+	XfrmInBufferError int
+	// Header Error
+	XfrmInHdrError int
+	// No state found
+	// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+	XfrmInNoStates int
+	// Transformation protocol specific error
+	// e.g. SA Key is wrong
+	XfrmInStateProtoError int
+	// Transformation mode specific error
+	XfrmInStateModeError int
+	// Sequence error
+	// e.g. sequence number is out of window
+	XfrmInStateSeqError int
+	// State is expired
+	XfrmInStateExpired int
+	// State has mismatch option
+	// e.g. UDP encapsulation type is mismatched
+	XfrmInStateMismatch int
+	// State is invalid
+	XfrmInStateInvalid int
+	// No matching template for states
+	// e.g. Inbound SAs are correct but SP rule is wrong
+	XfrmInTmplMismatch int
+	// No policy is found for states
+	// e.g. Inbound SAs are correct but no SP is found
+	XfrmInNoPols int
+	// Policy discards
+	XfrmInPolBlock int
+	// Policy error
+	XfrmInPolError int
+	// All errors which are not matched by others
+	XfrmOutError int
+	// Bundle generation error
+	XfrmOutBundleGenError int
+	// Bundle check error
+	XfrmOutBundleCheckError int
+	// No state was found
+	XfrmOutNoStates int
+	// Transformation protocol specific error
+	XfrmOutStateProtoError int
+	// Transportation mode specific error
+	XfrmOutStateModeError int
+	// Sequence error
+	// i.e sequence number overflow
+	XfrmOutStateSeqError int
+	// State is expired
+	XfrmOutStateExpired int
+	// Policy discads
+	XfrmOutPolBlock int
+	// Policy is dead
+	XfrmOutPolDead int
+	// Policy Error
+	XfrmOutPolError int
+	// Forward routing of a packet is not allowed
+	XfrmFwdHdrError int
+	// State is invalid, perhaps expired
+	XfrmOutStateInvalid int
+	// State hasn’t been fully acquired before use
+	XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return XfrmStat{}, err
+	}
+
+	return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+	file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
+	if err != nil {
+		return XfrmStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		x = XfrmStat{}
+		s = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return XfrmStat{}, err
+		}
+
+		switch name {
+		case "XfrmInError":
+			x.XfrmInError = value
+		case "XfrmInBufferError":
+			x.XfrmInBufferError = value
+		case "XfrmInHdrError":
+			x.XfrmInHdrError = value
+		case "XfrmInNoStates":
+			x.XfrmInNoStates = value
+		case "XfrmInStateProtoError":
+			x.XfrmInStateProtoError = value
+		case "XfrmInStateModeError":
+			x.XfrmInStateModeError = value
+		case "XfrmInStateSeqError":
+			x.XfrmInStateSeqError = value
+		case "XfrmInStateExpired":
+			x.XfrmInStateExpired = value
+		case "XfrmInStateInvalid":
+			x.XfrmInStateInvalid = value
+		case "XfrmInTmplMismatch":
+			x.XfrmInTmplMismatch = value
+		case "XfrmInNoPols":
+			x.XfrmInNoPols = value
+		case "XfrmInPolBlock":
+			x.XfrmInPolBlock = value
+		case "XfrmInPolError":
+			x.XfrmInPolError = value
+		case "XfrmOutError":
+			x.XfrmOutError = value
+		case "XfrmInStateMismatch":
+			x.XfrmInStateMismatch = value
+		case "XfrmOutBundleGenError":
+			x.XfrmOutBundleGenError = value
+		case "XfrmOutBundleCheckError":
+			x.XfrmOutBundleCheckError = value
+		case "XfrmOutNoStates":
+			x.XfrmOutNoStates = value
+		case "XfrmOutStateProtoError":
+			x.XfrmOutStateProtoError = value
+		case "XfrmOutStateModeError":
+			x.XfrmOutStateModeError = value
+		case "XfrmOutStateSeqError":
+			x.XfrmOutStateSeqError = value
+		case "XfrmOutStateExpired":
+			x.XfrmOutStateExpired = value
+		case "XfrmOutPolBlock":
+			x.XfrmOutPolBlock = value
+		case "XfrmOutPolDead":
+			x.XfrmOutPolDead = value
+		case "XfrmOutPolError":
+			x.XfrmOutPolError = value
+		case "XfrmFwdHdrError":
+			x.XfrmFwdHdrError = value
+		case "XfrmOutStateInvalid":
+			x.XfrmOutStateInvalid = value
+		case "XfrmAcquireError":
+			x.XfrmAcquireError = value
+		}
+
+	}
+
+	return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
new file mode 100644
index 0000000..742dff4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+// NetStat contains statistics for all the counters from one file.
+type NetStat struct {
+	Stats    map[string][]uint64
+	Filename string
+}
+
+// NetStat retrieves stats from `/proc/net/stat/`.
+func (fs FS) NetStat() ([]NetStat, error) {
+	statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
+	if err != nil {
+		return nil, err
+	}
+
+	var netStatsTotal []NetStat
+
+	for _, filePath := range statFiles {
+		procNetstat, err := parseNetstat(filePath)
+		if err != nil {
+			return nil, err
+		}
+		procNetstat.Filename = filepath.Base(filePath)
+
+		netStatsTotal = append(netStatsTotal, procNetstat)
+	}
+	return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+	netStat := NetStat{
+		Stats: make(map[string][]uint64),
+	}
+	file, err := os.Open(filePath)
+	if err != nil {
+		return netStat, err
+	}
+	defer file.Close()
+
+	scanner := bufio.NewScanner(file)
+	scanner.Scan()
+
+	// First string is always a header for stats
+	var headers []string
+	headers = append(headers, strings.Fields(scanner.Text())...)
+
+	// Other strings represent per-CPU counters
+	for scanner.Scan() {
+		for num, counter := range strings.Fields(scanner.Text()) {
+			value, err := strconv.ParseUint(counter, 16, 64)
+			if err != nil {
+				return NetStat{}, err
+			}
+			netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
+		}
+	}
+
+	return netStat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..368187f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,338 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+	// The process ID.
+	PID int
+
+	fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+var (
+	ErrFileParse  = errors.New("error parsing file")
+	ErrFileRead   = errors.New("error reading file")
+	ErrMountPoint = errors.New("error accessing mount point")
+)
+
+func (p Procs) Len() int           { return len(p) }
+func (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil || errors.Unwrap(err) == ErrMountPoint {
+		return Proc{}, err
+	}
+	return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.Proc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+	p, err := os.Readlink(fs.proc.Path("self"))
+	if err != nil {
+		return Proc{}, err
+	}
+	pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.Proc(pid)
+}
+
+// NewProc returns a process for the given pid.
+//
+// Deprecated: Use fs.Proc() instead.
+func (fs FS) NewProc(pid int) (Proc, error) {
+	return fs.Proc(pid)
+}
+
+// Proc returns a process for the given pid.
+func (fs FS) Proc(pid int) (Proc, error) {
+	if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+	d, err := os.Open(fs.proc.Path())
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+	}
+
+	p := Procs{}
+	for _, n := range names {
+		pid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+		p = append(p, Proc{PID: int(pid), fs: fs})
+	}
+
+	return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+	data, err := util.ReadFileNoStat(p.path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+
+	if len(data) < 1 {
+		return []string{}, nil
+	}
+
+	return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
+}
+
+// Wchan returns the wchan (wait channel) of a process.
+func (p Proc) Wchan() (string, error) {
+	f, err := os.Open(p.path("wchan"))
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	data, err := io.ReadAll(f)
+	if err != nil {
+		return "", err
+	}
+
+	wchan := string(data)
+	if wchan == "" || wchan == "0" {
+		return "", nil
+	}
+
+	return wchan, nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+	data, err := util.ReadFileNoStat(p.path("comm"))
+	if err != nil {
+		return "", err
+	}
+
+	return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+	exe, err := os.Readlink(p.path("exe"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+	wd, err := os.Readlink(p.path("cwd"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
+func (p Proc) RootDir() (string, error) {
+	rdir, err := os.Readlink(p.path("root"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	fds := make([]uintptr, len(names))
+	for i, n := range names {
+		fd, err := strconv.ParseInt(n, 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
+		}
+		fds[i] = uintptr(fd)
+	}
+
+	return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	targets := make([]string, len(names))
+
+	for i, name := range names {
+		target, err := os.Readlink(p.path("fd", name))
+		if err == nil {
+			targets[i] = target
+		}
+	}
+
+	return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+	// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+	if p.fs.isReal {
+		stat, err := os.Stat(p.path("fd"))
+		if err != nil {
+			return 0, err
+		}
+
+		size := stat.Size()
+		if size > 0 {
+			return int(size), nil
+		}
+	}
+
+	fds, err := p.fileDescriptors()
+	if err != nil {
+		return 0, err
+	}
+
+	return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+	f, err := os.Open(p.path("mountstats"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseMountStats(f)
+}
+
+// MountInfo retrieves mount information for mount points in a
+// process's namespace.
+// It supplies information missing in `/proc/self/mounts` and
+// fixes various other problems with that file too.
+func (p Proc) MountInfo() ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat(p.path("mountinfo"))
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+	d, err := os.Open(p.path("fd"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+	}
+
+	return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+	return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
+
+// FileDescriptorsInfo retrieves information about all file descriptors of
+// the process.
+func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	var fdinfos ProcFDInfos
+
+	for _, n := range names {
+		fdinfo, err := p.FDInfo(n)
+		if err != nil {
+			continue
+		}
+		fdinfos = append(fdinfos, *fdinfo)
+	}
+
+	return fdinfos, nil
+}
+
+// Schedstat returns task scheduling information for the process.
+func (p Proc) Schedstat() (ProcSchedstat, error) {
+	contents, err := os.ReadFile(p.path("schedstat"))
+	if err != nil {
+		return ProcSchedstat{}, err
+	}
+	return parseProcSchedstat(string(contents))
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
new file mode 100644
index 0000000..4a64347
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -0,0 +1,98 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
+// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
+// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
+// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
+// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
+// in this hierarchy
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type Cgroup struct {
+	// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
+	// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
+	HierarchyID int
+	// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
+	// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
+	Controllers []string
+	// Path of this control group, relative to the mount point of the cgroupfs representing this specific
+	// hierarchy
+	Path string
+}
+
+// parseCgroupString parses each line of the /proc/[pid]/cgroup file
+// Line format is hierarchyID:[controller1,controller2]:path.
+func parseCgroupString(cgroupStr string) (*Cgroup, error) {
+	var err error
+
+	fields := strings.SplitN(cgroupStr, ":", 3)
+	if len(fields) < 3 {
+		return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
+	}
+
+	cgroup := &Cgroup{
+		Path:        fields[2],
+		Controllers: nil,
+	}
+	cgroup.HierarchyID, err = strconv.Atoi(fields[0])
+	if err != nil {
+		return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
+	}
+	if fields[1] != "" {
+		ssNames := strings.Split(fields[1], ",")
+		cgroup.Controllers = append(cgroup.Controllers, ssNames...)
+	}
+	return cgroup, nil
+}
+
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
+func parseCgroups(data []byte) ([]Cgroup, error) {
+	var cgroups []Cgroup
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		mountString := scanner.Text()
+		parsedMounts, err := parseCgroupString(mountString)
+		if err != nil {
+			return nil, err
+		}
+		cgroups = append(cgroups, *parsedMounts)
+	}
+
+	err := scanner.Err()
+	return cgroups, err
+}
+
+// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
+// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
+func (p Proc) Cgroups() ([]Cgroup, error) {
+	data, err := util.ReadFileNoStat(p.path("cgroup"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCgroups(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000..5dd4938
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+	// The name of the controller. controller is also known as subsystem.
+	SubsysName string
+	// The unique ID of the cgroup hierarchy on which this controller is mounted.
+	Hierarchy int
+	// The number of control groups in this hierarchy using this controller.
+	Cgroups int
+	// This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+	Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name	hierarchy	num_cgroups	enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+	var err error
+
+	fields := strings.Fields(CgroupSummaryStr)
+	// require at least 4 fields
+	if len(fields) < 4 {
+		return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
+	}
+
+	CgroupSummary := &CgroupSummary{
+		SubsysName: fields[0],
+	}
+	CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
+	}
+	CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
+	}
+	CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
+	}
+	return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+	var CgroupSummarys []CgroupSummary
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		CgroupSummaryString := scanner.Text()
+		// ignore comment lines
+		if strings.HasPrefix(CgroupSummaryString, "#") {
+			continue
+		}
+		CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+		if err != nil {
+			return nil, err
+		}
+		CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+	}
+
+	err := scanner.Err()
+	return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
new file mode 100644
index 0000000..57a8989
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Environ reads process environments from `/proc/<pid>/environ`.
+func (p Proc) Environ() ([]string, error) {
+	environments := make([]string, 0)
+
+	data, err := util.ReadFileNoStat(p.path("environ"))
+	if err != nil {
+		return environments, err
+	}
+
+	environments = strings.Split(string(data), "\000")
+	if len(environments) > 0 {
+		environments = environments[:len(environments)-1]
+	}
+
+	return environments, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
new file mode 100644
index 0000000..fa761b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -0,0 +1,138 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"regexp"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
+	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
+	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+	rIno          = regexp.MustCompile(`^ino:\s+(\d+)$`)
+	rInotify      = regexp.MustCompile(`^inotify`)
+	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
+)
+
+// ProcFDInfo contains represents file descriptor information.
+type ProcFDInfo struct {
+	// File descriptor
+	FD string
+	// File offset
+	Pos string
+	// File access mode and status flags
+	Flags string
+	// Mount point ID
+	MntID string
+	// Inode number
+	Ino string
+	// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
+	InotifyInfos []InotifyInfo
+}
+
+// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
+func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
+	data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
+	if err != nil {
+		return nil, err
+	}
+
+	var text, pos, flags, mntid, ino string
+	var inotify []InotifyInfo
+
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		text = scanner.Text()
+		if rPos.MatchString(text) {
+			pos = rPos.FindStringSubmatch(text)[1]
+		} else if rFlags.MatchString(text) {
+			flags = rFlags.FindStringSubmatch(text)[1]
+		} else if rMntID.MatchString(text) {
+			mntid = rMntID.FindStringSubmatch(text)[1]
+		} else if rIno.MatchString(text) {
+			ino = rIno.FindStringSubmatch(text)[1]
+		} else if rInotify.MatchString(text) {
+			newInotify, err := parseInotifyInfo(text)
+			if err != nil {
+				return nil, err
+			}
+			inotify = append(inotify, *newInotify)
+		}
+	}
+
+	i := &ProcFDInfo{
+		FD:           fd,
+		Pos:          pos,
+		Flags:        flags,
+		MntID:        mntid,
+		Ino:          ino,
+		InotifyInfos: inotify,
+	}
+
+	return i, nil
+}
+
+// InotifyInfo represents a single inotify line in the fdinfo file.
+type InotifyInfo struct {
+	// Watch descriptor number
+	WD string
+	// Inode number
+	Ino string
+	// Device ID
+	Sdev string
+	// Mask of events being monitored
+	Mask string
+}
+
+// InotifyInfo constructor. Only available on kernel 3.8+.
+func parseInotifyInfo(line string) (*InotifyInfo, error) {
+	m := rInotifyParts.FindStringSubmatch(line)
+	if len(m) >= 4 {
+		var mask string
+		if len(m) == 5 {
+			mask = m[4]
+		}
+		i := &InotifyInfo{
+			WD:   m[1],
+			Ino:  m[2],
+			Sdev: m[3],
+			Mask: mask,
+		}
+		return i, nil
+	}
+	return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
+}
+
+// ProcFDInfos represents a list of ProcFDInfo structs.
+type ProcFDInfos []ProcFDInfo
+
+func (p ProcFDInfos) Len() int           { return len(p) }
+func (p ProcFDInfos) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
+
+// InotifyWatchLen returns the total number of inotify watches.
+func (p ProcFDInfos) InotifyWatchLen() (int, error) {
+	length := 0
+	for _, f := range p {
+		length += len(f.InotifyInfos)
+	}
+
+	return length, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000..86b4b45
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+	// Info is the type of interrupt.
+	Info string
+	// Devices is the name of the device that is located at that IRQ
+	Devices string
+	// Values is the number of interrupts per CPU.
+	Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+	data, err := util.ReadFileNoStat(p.path("interrupts"))
+	if err != nil {
+		return nil, err
+	}
+	return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+	var (
+		interrupts = Interrupts{}
+		scanner    = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return nil, errors.New("interrupts empty")
+	}
+	cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		if len(parts) == 0 { // skip empty lines
+			continue
+		}
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+		}
+		intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+		if len(parts) == 2 {
+			interrupts[intName] = Interrupt{
+				Info:    "",
+				Devices: "",
+				Values: []string{
+					parts[1],
+				},
+			}
+			continue
+		}
+
+		intr := Interrupt{
+			Values: parts[1 : cpuNum+1],
+		}
+
+		if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+			intr.Info = parts[cpuNum+1]
+			intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+		} else {
+			intr.Info = strings.Join(parts[cpuNum+1:], " ")
+		}
+		interrupts[intName] = intr
+	}
+
+	return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000..d15b66d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+	// Chars read.
+	RChar uint64
+	// Chars written.
+	WChar uint64
+	// Read syscalls.
+	SyscR uint64
+	// Write syscalls.
+	SyscW uint64
+	// Bytes read.
+	ReadBytes uint64
+	// Bytes written.
+	WriteBytes uint64
+	// Bytes written, but taking into account truncation. See
+	// Documentation/filesystems/proc.txt in the kernel sources for
+	// detailed explanation.
+	CancelledWriteBytes int64
+}
+
+// IO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) IO() (ProcIO, error) {
+	pio := ProcIO{}
+
+	data, err := util.ReadFileNoStat(p.path("io"))
+	if err != nil {
+		return pio, err
+	}
+
+	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+		"read_bytes: %d\nwrite_bytes: %d\n" +
+		"cancelled_write_bytes: %d\n" //nolint:misspell
+
+	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+	return pio, err
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9530b14
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+	// CPU time limit in seconds.
+	CPUTime uint64
+	// Maximum size of files that the process may create.
+	FileSize uint64
+	// Maximum size of the process's data segment (initialized data,
+	// uninitialized data, and heap).
+	DataSize uint64
+	// Maximum size of the process stack in bytes.
+	StackSize uint64
+	// Maximum size of a core file.
+	CoreFileSize uint64
+	// Limit of the process's resident set in pages.
+	ResidentSet uint64
+	// Maximum number of processes that can be created for the real user ID of
+	// the calling process.
+	Processes uint64
+	// Value one greater than the maximum file descriptor number that can be
+	// opened by this process.
+	OpenFiles uint64
+	// Maximum number of bytes of memory that may be locked into RAM.
+	LockedMemory uint64
+	// Maximum size of the process's virtual memory address space in bytes.
+	AddressSpace uint64
+	// Limit on the combined number of flock(2) locks and fcntl(2) leases that
+	// this process may establish.
+	FileLocks uint64
+	// Limit of signals that may be queued for the real user ID of the calling
+	// process.
+	PendingSignals uint64
+	// Limit on the number of bytes that can be allocated for POSIX message
+	// queues for the real user ID of the calling process.
+	MsqqueueSize uint64
+	// Limit of the nice priority set using setpriority(2) or nice(2).
+	NicePriority uint64
+	// Limit of the real-time priority set using sched_setscheduler(2) or
+	// sched_setparam(2).
+	RealtimePriority uint64
+	// Limit (in microseconds) on the amount of CPU time that a process
+	// scheduled under a real-time scheduling policy may consume without making
+	// a blocking system call.
+	RealtimeTimeout uint64
+}
+
+const (
+	limitsFields    = 4
+	limitsUnlimited = "unlimited"
+)
+
+var (
+	limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
+)
+
+// NewLimits returns the current soft limits of the process.
+//
+// Deprecated: Use p.Limits() instead.
+func (p Proc) NewLimits() (ProcLimits, error) {
+	return p.Limits()
+}
+
+// Limits returns the current soft limits of the process.
+func (p Proc) Limits() (ProcLimits, error) {
+	f, err := os.Open(p.path("limits"))
+	if err != nil {
+		return ProcLimits{}, err
+	}
+	defer f.Close()
+
+	var (
+		l = ProcLimits{}
+		s = bufio.NewScanner(f)
+	)
+
+	s.Scan() // Skip limits header
+
+	for s.Scan() {
+		//fields := limitsMatch.Split(s.Text(), limitsFields)
+		fields := limitsMatch.FindStringSubmatch(s.Text())
+		if len(fields) != limitsFields {
+			return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
+		}
+
+		switch fields[1] {
+		case "Max cpu time":
+			l.CPUTime, err = parseUint(fields[2])
+		case "Max file size":
+			l.FileSize, err = parseUint(fields[2])
+		case "Max data size":
+			l.DataSize, err = parseUint(fields[2])
+		case "Max stack size":
+			l.StackSize, err = parseUint(fields[2])
+		case "Max core file size":
+			l.CoreFileSize, err = parseUint(fields[2])
+		case "Max resident set":
+			l.ResidentSet, err = parseUint(fields[2])
+		case "Max processes":
+			l.Processes, err = parseUint(fields[2])
+		case "Max open files":
+			l.OpenFiles, err = parseUint(fields[2])
+		case "Max locked memory":
+			l.LockedMemory, err = parseUint(fields[2])
+		case "Max address space":
+			l.AddressSpace, err = parseUint(fields[2])
+		case "Max file locks":
+			l.FileLocks, err = parseUint(fields[2])
+		case "Max pending signals":
+			l.PendingSignals, err = parseUint(fields[2])
+		case "Max msgqueue size":
+			l.MsqqueueSize, err = parseUint(fields[2])
+		case "Max nice priority":
+			l.NicePriority, err = parseUint(fields[2])
+		case "Max realtime priority":
+			l.RealtimePriority, err = parseUint(fields[2])
+		case "Max realtime timeout":
+			l.RealtimeTimeout, err = parseUint(fields[2])
+		}
+		if err != nil {
+			return ProcLimits{}, err
+		}
+	}
+
+	return l, s.Err()
+}
+
+func parseUint(s string) (uint64, error) {
+	if s == limitsUnlimited {
+		return 18446744073709551615, nil
+	}
+	i, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
+	}
+	return i, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
new file mode 100644
index 0000000..7e75c28
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -0,0 +1,211 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
+type ProcMapPermissions struct {
+	// mapping has the [R]ead flag set
+	Read bool
+	// mapping has the [W]rite flag set
+	Write bool
+	// mapping has the [X]ecutable flag set
+	Execute bool
+	// mapping has the [S]hared flag set
+	Shared bool
+	// mapping is marked as [P]rivate (copy on write)
+	Private bool
+}
+
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
+type ProcMap struct {
+	// The start address of current mapping.
+	StartAddr uintptr
+	// The end address of the current mapping
+	EndAddr uintptr
+	// The permissions for this mapping
+	Perms *ProcMapPermissions
+	// The current offset into the file/fd (e.g., shared libs)
+	Offset int64
+	// Device owner of this mapping (major:minor) in Mkdev format.
+	Dev uint64
+	// The inode of the device above
+	Inode uint64
+	// The file or psuedofile (or empty==anonymous)
+	Pathname string
+}
+
+// parseDevice parses the device token of a line and converts it to a dev_t
+// (mkdev) like structure.
+func parseDevice(s string) (uint64, error) {
+	i := strings.Index(s, ":")
+	if i == -1 {
+		return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
+	}
+
+	major, err := strconv.ParseUint(s[0:i], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	minor, err := strconv.ParseUint(s[i+1:], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return unix.Mkdev(uint32(major), uint32(minor)), nil
+}
+
+// parseAddress converts a hex-string to a uintptr.
+func parseAddress(s string) (uintptr, error) {
+	a, err := strconv.ParseUint(s, 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return uintptr(a), nil
+}
+
+// parseAddresses parses the start-end address.
+func parseAddresses(s string) (uintptr, uintptr, error) {
+	idx := strings.Index(s, "-")
+	if idx == -1 {
+		return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
+	}
+
+	saddr, err := parseAddress(s[0:idx])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	eaddr, err := parseAddress(s[idx+1:])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	return saddr, eaddr, nil
+}
+
+// parsePermissions parses a token and returns any that are set.
+func parsePermissions(s string) (*ProcMapPermissions, error) {
+	if len(s) < 4 {
+		return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
+	}
+
+	perms := ProcMapPermissions{}
+	for _, ch := range s {
+		switch ch {
+		case 'r':
+			perms.Read = true
+		case 'w':
+			perms.Write = true
+		case 'x':
+			perms.Execute = true
+		case 'p':
+			perms.Private = true
+		case 's':
+			perms.Shared = true
+		}
+	}
+
+	return &perms, nil
+}
+
+// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
+// buffer.
+func parseProcMap(text string) (*ProcMap, error) {
+	fields := strings.Fields(text)
+	if len(fields) < 5 {
+		return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
+	}
+
+	saddr, eaddr, err := parseAddresses(fields[0])
+	if err != nil {
+		return nil, err
+	}
+
+	perms, err := parsePermissions(fields[1])
+	if err != nil {
+		return nil, err
+	}
+
+	offset, err := strconv.ParseInt(fields[2], 16, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	device, err := parseDevice(fields[3])
+	if err != nil {
+		return nil, err
+	}
+
+	inode, err := strconv.ParseUint(fields[4], 10, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	pathname := ""
+
+	if len(fields) >= 5 {
+		pathname = strings.Join(fields[5:], " ")
+	}
+
+	return &ProcMap{
+		StartAddr: saddr,
+		EndAddr:   eaddr,
+		Perms:     perms,
+		Offset:    offset,
+		Dev:       device,
+		Inode:     inode,
+		Pathname:  pathname,
+	}, nil
+}
+
+// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
+// process.
+func (p Proc) ProcMaps() ([]*ProcMap, error) {
+	file, err := os.Open(p.path("maps"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	maps := []*ProcMap{}
+	scan := bufio.NewScanner(file)
+
+	for scan.Scan() {
+		m, err := parseProcMap(scan.Text())
+		if err != nil {
+			return nil, err
+		}
+
+		maps = append(maps, m)
+	}
+
+	return maps, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000..4248c17
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,443 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc/<pid>/net/netstat.
+type ProcNetstat struct {
+	// The process ID.
+	PID int
+	TcpExt
+	IpExt
+}
+
+type TcpExt struct { // nolint:revive
+	SyncookiesSent            *float64
+	SyncookiesRecv            *float64
+	SyncookiesFailed          *float64
+	EmbryonicRsts             *float64
+	PruneCalled               *float64
+	RcvPruned                 *float64
+	OfoPruned                 *float64
+	OutOfWindowIcmps          *float64
+	LockDroppedIcmps          *float64
+	ArpFilter                 *float64
+	TW                        *float64
+	TWRecycled                *float64
+	TWKilled                  *float64
+	PAWSActive                *float64
+	PAWSEstab                 *float64
+	DelayedACKs               *float64
+	DelayedACKLocked          *float64
+	DelayedACKLost            *float64
+	ListenOverflows           *float64
+	ListenDrops               *float64
+	TCPHPHits                 *float64
+	TCPPureAcks               *float64
+	TCPHPAcks                 *float64
+	TCPRenoRecovery           *float64
+	TCPSackRecovery           *float64
+	TCPSACKReneging           *float64
+	TCPSACKReorder            *float64
+	TCPRenoReorder            *float64
+	TCPTSReorder              *float64
+	TCPFullUndo               *float64
+	TCPPartialUndo            *float64
+	TCPDSACKUndo              *float64
+	TCPLossUndo               *float64
+	TCPLostRetransmit         *float64
+	TCPRenoFailures           *float64
+	TCPSackFailures           *float64
+	TCPLossFailures           *float64
+	TCPFastRetrans            *float64
+	TCPSlowStartRetrans       *float64
+	TCPTimeouts               *float64
+	TCPLossProbes             *float64
+	TCPLossProbeRecovery      *float64
+	TCPRenoRecoveryFail       *float64
+	TCPSackRecoveryFail       *float64
+	TCPRcvCollapsed           *float64
+	TCPDSACKOldSent           *float64
+	TCPDSACKOfoSent           *float64
+	TCPDSACKRecv              *float64
+	TCPDSACKOfoRecv           *float64
+	TCPAbortOnData            *float64
+	TCPAbortOnClose           *float64
+	TCPAbortOnMemory          *float64
+	TCPAbortOnTimeout         *float64
+	TCPAbortOnLinger          *float64
+	TCPAbortFailed            *float64
+	TCPMemoryPressures        *float64
+	TCPMemoryPressuresChrono  *float64
+	TCPSACKDiscard            *float64
+	TCPDSACKIgnoredOld        *float64
+	TCPDSACKIgnoredNoUndo     *float64
+	TCPSpuriousRTOs           *float64
+	TCPMD5NotFound            *float64
+	TCPMD5Unexpected          *float64
+	TCPMD5Failure             *float64
+	TCPSackShifted            *float64
+	TCPSackMerged             *float64
+	TCPSackShiftFallback      *float64
+	TCPBacklogDrop            *float64
+	PFMemallocDrop            *float64
+	TCPMinTTLDrop             *float64
+	TCPDeferAcceptDrop        *float64
+	IPReversePathFilter       *float64
+	TCPTimeWaitOverflow       *float64
+	TCPReqQFullDoCookies      *float64
+	TCPReqQFullDrop           *float64
+	TCPRetransFail            *float64
+	TCPRcvCoalesce            *float64
+	TCPRcvQDrop               *float64
+	TCPOFOQueue               *float64
+	TCPOFODrop                *float64
+	TCPOFOMerge               *float64
+	TCPChallengeACK           *float64
+	TCPSYNChallenge           *float64
+	TCPFastOpenActive         *float64
+	TCPFastOpenActiveFail     *float64
+	TCPFastOpenPassive        *float64
+	TCPFastOpenPassiveFail    *float64
+	TCPFastOpenListenOverflow *float64
+	TCPFastOpenCookieReqd     *float64
+	TCPFastOpenBlackhole      *float64
+	TCPSpuriousRtxHostQueues  *float64
+	BusyPollRxPackets         *float64
+	TCPAutoCorking            *float64
+	TCPFromZeroWindowAdv      *float64
+	TCPToZeroWindowAdv        *float64
+	TCPWantZeroWindowAdv      *float64
+	TCPSynRetrans             *float64
+	TCPOrigDataSent           *float64
+	TCPHystartTrainDetect     *float64
+	TCPHystartTrainCwnd       *float64
+	TCPHystartDelayDetect     *float64
+	TCPHystartDelayCwnd       *float64
+	TCPACKSkippedSynRecv      *float64
+	TCPACKSkippedPAWS         *float64
+	TCPACKSkippedSeq          *float64
+	TCPACKSkippedFinWait2     *float64
+	TCPACKSkippedTimeWait     *float64
+	TCPACKSkippedChallenge    *float64
+	TCPWinProbe               *float64
+	TCPKeepAlive              *float64
+	TCPMTUPFail               *float64
+	TCPMTUPSuccess            *float64
+	TCPWqueueTooBig           *float64
+}
+
+type IpExt struct { // nolint:revive
+	InNoRoutes      *float64
+	InTruncatedPkts *float64
+	InMcastPkts     *float64
+	OutMcastPkts    *float64
+	InBcastPkts     *float64
+	OutBcastPkts    *float64
+	InOctets        *float64
+	OutOctets       *float64
+	InMcastOctets   *float64
+	OutMcastOctets  *float64
+	InBcastOctets   *float64
+	OutBcastOctets  *float64
+	InCsumErrors    *float64
+	InNoECTPkts     *float64
+	InECT1Pkts      *float64
+	InECT0Pkts      *float64
+	InCEPkts        *float64
+	ReasmOverlaps   *float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+	filename := p.path("net/netstat")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcNetstat{PID: p.PID}, err
+	}
+	procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
+	procNetstat.PID = p.PID
+	return procNetstat, err
+}
+
+// parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
+// and returns a ProcNetstat structure.
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+	var (
+		scanner     = bufio.NewScanner(r)
+		procNetstat = ProcNetstat{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procNetstat, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "TcpExt":
+				switch key {
+				case "SyncookiesSent":
+					procNetstat.SyncookiesSent = &value
+				case "SyncookiesRecv":
+					procNetstat.SyncookiesRecv = &value
+				case "SyncookiesFailed":
+					procNetstat.SyncookiesFailed = &value
+				case "EmbryonicRsts":
+					procNetstat.EmbryonicRsts = &value
+				case "PruneCalled":
+					procNetstat.PruneCalled = &value
+				case "RcvPruned":
+					procNetstat.RcvPruned = &value
+				case "OfoPruned":
+					procNetstat.OfoPruned = &value
+				case "OutOfWindowIcmps":
+					procNetstat.OutOfWindowIcmps = &value
+				case "LockDroppedIcmps":
+					procNetstat.LockDroppedIcmps = &value
+				case "ArpFilter":
+					procNetstat.ArpFilter = &value
+				case "TW":
+					procNetstat.TW = &value
+				case "TWRecycled":
+					procNetstat.TWRecycled = &value
+				case "TWKilled":
+					procNetstat.TWKilled = &value
+				case "PAWSActive":
+					procNetstat.PAWSActive = &value
+				case "PAWSEstab":
+					procNetstat.PAWSEstab = &value
+				case "DelayedACKs":
+					procNetstat.DelayedACKs = &value
+				case "DelayedACKLocked":
+					procNetstat.DelayedACKLocked = &value
+				case "DelayedACKLost":
+					procNetstat.DelayedACKLost = &value
+				case "ListenOverflows":
+					procNetstat.ListenOverflows = &value
+				case "ListenDrops":
+					procNetstat.ListenDrops = &value
+				case "TCPHPHits":
+					procNetstat.TCPHPHits = &value
+				case "TCPPureAcks":
+					procNetstat.TCPPureAcks = &value
+				case "TCPHPAcks":
+					procNetstat.TCPHPAcks = &value
+				case "TCPRenoRecovery":
+					procNetstat.TCPRenoRecovery = &value
+				case "TCPSackRecovery":
+					procNetstat.TCPSackRecovery = &value
+				case "TCPSACKReneging":
+					procNetstat.TCPSACKReneging = &value
+				case "TCPSACKReorder":
+					procNetstat.TCPSACKReorder = &value
+				case "TCPRenoReorder":
+					procNetstat.TCPRenoReorder = &value
+				case "TCPTSReorder":
+					procNetstat.TCPTSReorder = &value
+				case "TCPFullUndo":
+					procNetstat.TCPFullUndo = &value
+				case "TCPPartialUndo":
+					procNetstat.TCPPartialUndo = &value
+				case "TCPDSACKUndo":
+					procNetstat.TCPDSACKUndo = &value
+				case "TCPLossUndo":
+					procNetstat.TCPLossUndo = &value
+				case "TCPLostRetransmit":
+					procNetstat.TCPLostRetransmit = &value
+				case "TCPRenoFailures":
+					procNetstat.TCPRenoFailures = &value
+				case "TCPSackFailures":
+					procNetstat.TCPSackFailures = &value
+				case "TCPLossFailures":
+					procNetstat.TCPLossFailures = &value
+				case "TCPFastRetrans":
+					procNetstat.TCPFastRetrans = &value
+				case "TCPSlowStartRetrans":
+					procNetstat.TCPSlowStartRetrans = &value
+				case "TCPTimeouts":
+					procNetstat.TCPTimeouts = &value
+				case "TCPLossProbes":
+					procNetstat.TCPLossProbes = &value
+				case "TCPLossProbeRecovery":
+					procNetstat.TCPLossProbeRecovery = &value
+				case "TCPRenoRecoveryFail":
+					procNetstat.TCPRenoRecoveryFail = &value
+				case "TCPSackRecoveryFail":
+					procNetstat.TCPSackRecoveryFail = &value
+				case "TCPRcvCollapsed":
+					procNetstat.TCPRcvCollapsed = &value
+				case "TCPDSACKOldSent":
+					procNetstat.TCPDSACKOldSent = &value
+				case "TCPDSACKOfoSent":
+					procNetstat.TCPDSACKOfoSent = &value
+				case "TCPDSACKRecv":
+					procNetstat.TCPDSACKRecv = &value
+				case "TCPDSACKOfoRecv":
+					procNetstat.TCPDSACKOfoRecv = &value
+				case "TCPAbortOnData":
+					procNetstat.TCPAbortOnData = &value
+				case "TCPAbortOnClose":
+					procNetstat.TCPAbortOnClose = &value
+				case "TCPDeferAcceptDrop":
+					procNetstat.TCPDeferAcceptDrop = &value
+				case "IPReversePathFilter":
+					procNetstat.IPReversePathFilter = &value
+				case "TCPTimeWaitOverflow":
+					procNetstat.TCPTimeWaitOverflow = &value
+				case "TCPReqQFullDoCookies":
+					procNetstat.TCPReqQFullDoCookies = &value
+				case "TCPReqQFullDrop":
+					procNetstat.TCPReqQFullDrop = &value
+				case "TCPRetransFail":
+					procNetstat.TCPRetransFail = &value
+				case "TCPRcvCoalesce":
+					procNetstat.TCPRcvCoalesce = &value
+				case "TCPRcvQDrop":
+					procNetstat.TCPRcvQDrop = &value
+				case "TCPOFOQueue":
+					procNetstat.TCPOFOQueue = &value
+				case "TCPOFODrop":
+					procNetstat.TCPOFODrop = &value
+				case "TCPOFOMerge":
+					procNetstat.TCPOFOMerge = &value
+				case "TCPChallengeACK":
+					procNetstat.TCPChallengeACK = &value
+				case "TCPSYNChallenge":
+					procNetstat.TCPSYNChallenge = &value
+				case "TCPFastOpenActive":
+					procNetstat.TCPFastOpenActive = &value
+				case "TCPFastOpenActiveFail":
+					procNetstat.TCPFastOpenActiveFail = &value
+				case "TCPFastOpenPassive":
+					procNetstat.TCPFastOpenPassive = &value
+				case "TCPFastOpenPassiveFail":
+					procNetstat.TCPFastOpenPassiveFail = &value
+				case "TCPFastOpenListenOverflow":
+					procNetstat.TCPFastOpenListenOverflow = &value
+				case "TCPFastOpenCookieReqd":
+					procNetstat.TCPFastOpenCookieReqd = &value
+				case "TCPFastOpenBlackhole":
+					procNetstat.TCPFastOpenBlackhole = &value
+				case "TCPSpuriousRtxHostQueues":
+					procNetstat.TCPSpuriousRtxHostQueues = &value
+				case "BusyPollRxPackets":
+					procNetstat.BusyPollRxPackets = &value
+				case "TCPAutoCorking":
+					procNetstat.TCPAutoCorking = &value
+				case "TCPFromZeroWindowAdv":
+					procNetstat.TCPFromZeroWindowAdv = &value
+				case "TCPToZeroWindowAdv":
+					procNetstat.TCPToZeroWindowAdv = &value
+				case "TCPWantZeroWindowAdv":
+					procNetstat.TCPWantZeroWindowAdv = &value
+				case "TCPSynRetrans":
+					procNetstat.TCPSynRetrans = &value
+				case "TCPOrigDataSent":
+					procNetstat.TCPOrigDataSent = &value
+				case "TCPHystartTrainDetect":
+					procNetstat.TCPHystartTrainDetect = &value
+				case "TCPHystartTrainCwnd":
+					procNetstat.TCPHystartTrainCwnd = &value
+				case "TCPHystartDelayDetect":
+					procNetstat.TCPHystartDelayDetect = &value
+				case "TCPHystartDelayCwnd":
+					procNetstat.TCPHystartDelayCwnd = &value
+				case "TCPACKSkippedSynRecv":
+					procNetstat.TCPACKSkippedSynRecv = &value
+				case "TCPACKSkippedPAWS":
+					procNetstat.TCPACKSkippedPAWS = &value
+				case "TCPACKSkippedSeq":
+					procNetstat.TCPACKSkippedSeq = &value
+				case "TCPACKSkippedFinWait2":
+					procNetstat.TCPACKSkippedFinWait2 = &value
+				case "TCPACKSkippedTimeWait":
+					procNetstat.TCPACKSkippedTimeWait = &value
+				case "TCPACKSkippedChallenge":
+					procNetstat.TCPACKSkippedChallenge = &value
+				case "TCPWinProbe":
+					procNetstat.TCPWinProbe = &value
+				case "TCPKeepAlive":
+					procNetstat.TCPKeepAlive = &value
+				case "TCPMTUPFail":
+					procNetstat.TCPMTUPFail = &value
+				case "TCPMTUPSuccess":
+					procNetstat.TCPMTUPSuccess = &value
+				case "TCPWqueueTooBig":
+					procNetstat.TCPWqueueTooBig = &value
+				}
+			case "IpExt":
+				switch key {
+				case "InNoRoutes":
+					procNetstat.InNoRoutes = &value
+				case "InTruncatedPkts":
+					procNetstat.InTruncatedPkts = &value
+				case "InMcastPkts":
+					procNetstat.InMcastPkts = &value
+				case "OutMcastPkts":
+					procNetstat.OutMcastPkts = &value
+				case "InBcastPkts":
+					procNetstat.InBcastPkts = &value
+				case "OutBcastPkts":
+					procNetstat.OutBcastPkts = &value
+				case "InOctets":
+					procNetstat.InOctets = &value
+				case "OutOctets":
+					procNetstat.OutOctets = &value
+				case "InMcastOctets":
+					procNetstat.InMcastOctets = &value
+				case "OutMcastOctets":
+					procNetstat.OutMcastOctets = &value
+				case "InBcastOctets":
+					procNetstat.InBcastOctets = &value
+				case "OutBcastOctets":
+					procNetstat.OutBcastOctets = &value
+				case "InCsumErrors":
+					procNetstat.InCsumErrors = &value
+				case "InNoECTPkts":
+					procNetstat.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procNetstat.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procNetstat.InECT0Pkts = &value
+				case "InCEPkts":
+					procNetstat.InCEPkts = &value
+				case "ReasmOverlaps":
+					procNetstat.ReasmOverlaps = &value
+				}
+			}
+		}
+	}
+	return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 0000000..0f8f847
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+	Type  string // Namespace type.
+	Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) Namespaces() (Namespaces, error) {
+	d, err := os.Open(p.path("ns"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
+	}
+
+	ns := make(Namespaces, len(names))
+	for _, name := range names {
+		target, err := os.Readlink(p.path("ns", name))
+		if err != nil {
+			return nil, err
+		}
+
+		fields := strings.SplitN(target, ":", 2)
+		if len(fields) != 2 {
+			return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
+		}
+
+		typ := fields[0]
+		inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
+		}
+
+		ns[name] = Namespace{typ, uint32(inode)}
+	}
+
+	return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
new file mode 100644
index 0000000..ccd35f1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -0,0 +1,102 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// The PSI / pressure interface is described at
+//   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
+// Each resource (cpu, io, memory, ...) is exposed as a single file.
+// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
+// Each line contains several averages (over n seconds) and a total in µs.
+//
+// Example io pressure file:
+// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
+// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
+
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
+type PSILine struct {
+	Avg10  float64
+	Avg60  float64
+	Avg300 float64
+	Total  uint64
+}
+
+// PSIStats represent pressure stall information from /proc/pressure/*
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
+type PSIStats struct {
+	Some *PSILine
+	Full *PSILine
+}
+
+// PSIStatsForResource reads pressure stall information for the specified
+// resource from /proc/pressure/<resource>. At time of writing this can be
+// either "cpu", "memory" or "io".
+func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
+	if err != nil {
+		return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
+	}
+
+	return parsePSIStats(bytes.NewReader(data))
+}
+
+// parsePSIStats parses the specified file for pressure stall information.
+func parsePSIStats(r io.Reader) (PSIStats, error) {
+	psiStats := PSIStats{}
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		l := scanner.Text()
+		prefix := strings.Split(l, " ")[0]
+		switch prefix {
+		case "some":
+			psi := PSILine{}
+			_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+			if err != nil {
+				return PSIStats{}, err
+			}
+			psiStats.Some = &psi
+		case "full":
+			psi := PSILine{}
+			_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+			if err != nil {
+				return PSIStats{}, err
+			}
+			psiStats.Full = &psi
+		default:
+			// If we encounter a line with an unknown prefix, ignore it and move on
+			// Should new measurement types be added in the future we'll simply ignore them instead
+			// of erroring on retrieval
+			continue
+		}
+	}
+
+	return psiStats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
new file mode 100644
index 0000000..9a297af
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	// Match the header line before each mapped zone in `/proc/pid/smaps`.
+	procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
+)
+
+type ProcSMapsRollup struct {
+	// Amount of the mapping that is currently resident in RAM.
+	Rss uint64
+	// Process's proportional share of this mapping.
+	Pss uint64
+	// Size in bytes of clean shared pages.
+	SharedClean uint64
+	// Size in bytes of dirty shared pages.
+	SharedDirty uint64
+	// Size in bytes of clean private pages.
+	PrivateClean uint64
+	// Size in bytes of dirty private pages.
+	PrivateDirty uint64
+	// Amount of memory currently marked as referenced or accessed.
+	Referenced uint64
+	// Amount of memory that does not belong to any file.
+	Anonymous uint64
+	// Amount would-be-anonymous memory currently on swap.
+	Swap uint64
+	// Process's proportional memory on swap.
+	SwapPss uint64
+}
+
+// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
+// process.
+//
+// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
+// we read and summed.
+func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
+	data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
+	if err != nil && os.IsNotExist(err) {
+		return p.procSMapsRollupManual()
+	}
+	if err != nil {
+		return ProcSMapsRollup{}, err
+	}
+
+	lines := strings.Split(string(data), "\n")
+	smaps := ProcSMapsRollup{}
+
+	// skip first line which don't contains information we need
+	lines = lines[1:]
+	for _, line := range lines {
+		if line == "" {
+			continue
+		}
+
+		if err := smaps.parseLine(line); err != nil {
+			return ProcSMapsRollup{}, err
+		}
+	}
+
+	return smaps, nil
+}
+
+// Read /proc/pid/smaps and do the roll-up in Go code.
+func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
+	file, err := os.Open(p.path("smaps"))
+	if err != nil {
+		return ProcSMapsRollup{}, err
+	}
+	defer file.Close()
+
+	smaps := ProcSMapsRollup{}
+	scan := bufio.NewScanner(file)
+
+	for scan.Scan() {
+		line := scan.Text()
+
+		if procSMapsHeaderLine.MatchString(line) {
+			continue
+		}
+
+		if err := smaps.parseLine(line); err != nil {
+			return ProcSMapsRollup{}, err
+		}
+	}
+
+	return smaps, nil
+}
+
+func (s *ProcSMapsRollup) parseLine(line string) error {
+	kv := strings.SplitN(line, ":", 2)
+	if len(kv) != 2 {
+		return errors.New("invalid net/dev line, missing colon")
+	}
+
+	k := kv[0]
+	if k == "VmFlags" {
+		return nil
+	}
+
+	v := strings.TrimSpace(kv[1])
+	v = strings.TrimSuffix(v, " kB")
+
+	vKBytes, err := strconv.ParseUint(v, 10, 64)
+	if err != nil {
+		return err
+	}
+	vBytes := vKBytes * 1024
+
+	s.addValue(k, vBytes)
+
+	return nil
+}
+
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
+	switch k {
+	case "Rss":
+		s.Rss += vUintBytes
+	case "Pss":
+		s.Pss += vUintBytes
+	case "Shared_Clean":
+		s.SharedClean += vUintBytes
+	case "Shared_Dirty":
+		s.SharedDirty += vUintBytes
+	case "Private_Clean":
+		s.PrivateClean += vUintBytes
+	case "Private_Dirty":
+		s.PrivateDirty += vUintBytes
+	case "Referenced":
+		s.Referenced += vUintBytes
+	case "Anonymous":
+		s.Anonymous += vUintBytes
+	case "Swap":
+		s.Swap += vUintBytes
+	case "SwapPss":
+		s.SwapPss += vUintBytes
+	}
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000..4bdc90b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc/<pid>/net/snmp.
+type ProcSnmp struct {
+	// The process ID.
+	PID int
+	Ip
+	Icmp
+	IcmpMsg
+	Tcp
+	Udp
+	UdpLite
+}
+
+type Ip struct { // nolint:revive
+	Forwarding      *float64
+	DefaultTTL      *float64
+	InReceives      *float64
+	InHdrErrors     *float64
+	InAddrErrors    *float64
+	ForwDatagrams   *float64
+	InUnknownProtos *float64
+	InDiscards      *float64
+	InDelivers      *float64
+	OutRequests     *float64
+	OutDiscards     *float64
+	OutNoRoutes     *float64
+	ReasmTimeout    *float64
+	ReasmReqds      *float64
+	ReasmOKs        *float64
+	ReasmFails      *float64
+	FragOKs         *float64
+	FragFails       *float64
+	FragCreates     *float64
+}
+
+type Icmp struct { // nolint:revive
+	InMsgs           *float64
+	InErrors         *float64
+	InCsumErrors     *float64
+	InDestUnreachs   *float64
+	InTimeExcds      *float64
+	InParmProbs      *float64
+	InSrcQuenchs     *float64
+	InRedirects      *float64
+	InEchos          *float64
+	InEchoReps       *float64
+	InTimestamps     *float64
+	InTimestampReps  *float64
+	InAddrMasks      *float64
+	InAddrMaskReps   *float64
+	OutMsgs          *float64
+	OutErrors        *float64
+	OutDestUnreachs  *float64
+	OutTimeExcds     *float64
+	OutParmProbs     *float64
+	OutSrcQuenchs    *float64
+	OutRedirects     *float64
+	OutEchos         *float64
+	OutEchoReps      *float64
+	OutTimestamps    *float64
+	OutTimestampReps *float64
+	OutAddrMasks     *float64
+	OutAddrMaskReps  *float64
+}
+
+type IcmpMsg struct {
+	InType3  *float64
+	OutType3 *float64
+}
+
+type Tcp struct { // nolint:revive
+	RtoAlgorithm *float64
+	RtoMin       *float64
+	RtoMax       *float64
+	MaxConn      *float64
+	ActiveOpens  *float64
+	PassiveOpens *float64
+	AttemptFails *float64
+	EstabResets  *float64
+	CurrEstab    *float64
+	InSegs       *float64
+	OutSegs      *float64
+	RetransSegs  *float64
+	InErrs       *float64
+	OutRsts      *float64
+	InCsumErrors *float64
+}
+
+type Udp struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+	filename := p.path("net/snmp")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcSnmp{PID: p.PID}, err
+	}
+	procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+	procSnmp.PID = p.PID
+	return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc/<pid>/net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+	var (
+		scanner  = bufio.NewScanner(r)
+		procSnmp = ProcSnmp{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procSnmp, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "Ip":
+				switch key {
+				case "Forwarding":
+					procSnmp.Forwarding = &value
+				case "DefaultTTL":
+					procSnmp.DefaultTTL = &value
+				case "InReceives":
+					procSnmp.InReceives = &value
+				case "InHdrErrors":
+					procSnmp.InHdrErrors = &value
+				case "InAddrErrors":
+					procSnmp.InAddrErrors = &value
+				case "ForwDatagrams":
+					procSnmp.ForwDatagrams = &value
+				case "InUnknownProtos":
+					procSnmp.InUnknownProtos = &value
+				case "InDiscards":
+					procSnmp.InDiscards = &value
+				case "InDelivers":
+					procSnmp.InDelivers = &value
+				case "OutRequests":
+					procSnmp.OutRequests = &value
+				case "OutDiscards":
+					procSnmp.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp.ReasmFails = &value
+				case "FragOKs":
+					procSnmp.FragOKs = &value
+				case "FragFails":
+					procSnmp.FragFails = &value
+				case "FragCreates":
+					procSnmp.FragCreates = &value
+				}
+			case "Icmp":
+				switch key {
+				case "InMsgs":
+					procSnmp.InMsgs = &value
+				case "InErrors":
+					procSnmp.Icmp.InErrors = &value
+				case "InCsumErrors":
+					procSnmp.Icmp.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp.InDestUnreachs = &value
+				case "InTimeExcds":
+					procSnmp.InTimeExcds = &value
+				case "InParmProbs":
+					procSnmp.InParmProbs = &value
+				case "InSrcQuenchs":
+					procSnmp.InSrcQuenchs = &value
+				case "InRedirects":
+					procSnmp.InRedirects = &value
+				case "InEchos":
+					procSnmp.InEchos = &value
+				case "InEchoReps":
+					procSnmp.InEchoReps = &value
+				case "InTimestamps":
+					procSnmp.InTimestamps = &value
+				case "InTimestampReps":
+					procSnmp.InTimestampReps = &value
+				case "InAddrMasks":
+					procSnmp.InAddrMasks = &value
+				case "InAddrMaskReps":
+					procSnmp.InAddrMaskReps = &value
+				case "OutMsgs":
+					procSnmp.OutMsgs = &value
+				case "OutErrors":
+					procSnmp.OutErrors = &value
+				case "OutDestUnreachs":
+					procSnmp.OutDestUnreachs = &value
+				case "OutTimeExcds":
+					procSnmp.OutTimeExcds = &value
+				case "OutParmProbs":
+					procSnmp.OutParmProbs = &value
+				case "OutSrcQuenchs":
+					procSnmp.OutSrcQuenchs = &value
+				case "OutRedirects":
+					procSnmp.OutRedirects = &value
+				case "OutEchos":
+					procSnmp.OutEchos = &value
+				case "OutEchoReps":
+					procSnmp.OutEchoReps = &value
+				case "OutTimestamps":
+					procSnmp.OutTimestamps = &value
+				case "OutTimestampReps":
+					procSnmp.OutTimestampReps = &value
+				case "OutAddrMasks":
+					procSnmp.OutAddrMasks = &value
+				case "OutAddrMaskReps":
+					procSnmp.OutAddrMaskReps = &value
+				}
+			case "IcmpMsg":
+				switch key {
+				case "InType3":
+					procSnmp.InType3 = &value
+				case "OutType3":
+					procSnmp.OutType3 = &value
+				}
+			case "Tcp":
+				switch key {
+				case "RtoAlgorithm":
+					procSnmp.RtoAlgorithm = &value
+				case "RtoMin":
+					procSnmp.RtoMin = &value
+				case "RtoMax":
+					procSnmp.RtoMax = &value
+				case "MaxConn":
+					procSnmp.MaxConn = &value
+				case "ActiveOpens":
+					procSnmp.ActiveOpens = &value
+				case "PassiveOpens":
+					procSnmp.PassiveOpens = &value
+				case "AttemptFails":
+					procSnmp.AttemptFails = &value
+				case "EstabResets":
+					procSnmp.EstabResets = &value
+				case "CurrEstab":
+					procSnmp.CurrEstab = &value
+				case "InSegs":
+					procSnmp.InSegs = &value
+				case "OutSegs":
+					procSnmp.OutSegs = &value
+				case "RetransSegs":
+					procSnmp.RetransSegs = &value
+				case "InErrs":
+					procSnmp.InErrs = &value
+				case "OutRsts":
+					procSnmp.OutRsts = &value
+				case "InCsumErrors":
+					procSnmp.Tcp.InCsumErrors = &value
+				}
+			case "Udp":
+				switch key {
+				case "InDatagrams":
+					procSnmp.Udp.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.Udp.NoPorts = &value
+				case "InErrors":
+					procSnmp.Udp.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.Udp.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.Udp.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.Udp.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.Udp.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.Udp.IgnoredMulti = &value
+				}
+			case "UdpLite":
+				switch key {
+				case "InDatagrams":
+					procSnmp.UdpLite.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.UdpLite.NoPorts = &value
+				case "InErrors":
+					procSnmp.UdpLite.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.UdpLite.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.UdpLite.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.UdpLite.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.UdpLite.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.UdpLite.IgnoredMulti = &value
+				}
+			}
+		}
+	}
+	return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000..fb7fd39
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
+type ProcSnmp6 struct {
+	// The process ID.
+	PID int
+	Ip6
+	Icmp6
+	Udp6
+	UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+	InReceives       *float64
+	InHdrErrors      *float64
+	InTooBigErrors   *float64
+	InNoRoutes       *float64
+	InAddrErrors     *float64
+	InUnknownProtos  *float64
+	InTruncatedPkts  *float64
+	InDiscards       *float64
+	InDelivers       *float64
+	OutForwDatagrams *float64
+	OutRequests      *float64
+	OutDiscards      *float64
+	OutNoRoutes      *float64
+	ReasmTimeout     *float64
+	ReasmReqds       *float64
+	ReasmOKs         *float64
+	ReasmFails       *float64
+	FragOKs          *float64
+	FragFails        *float64
+	FragCreates      *float64
+	InMcastPkts      *float64
+	OutMcastPkts     *float64
+	InOctets         *float64
+	OutOctets        *float64
+	InMcastOctets    *float64
+	OutMcastOctets   *float64
+	InBcastOctets    *float64
+	OutBcastOctets   *float64
+	InNoECTPkts      *float64
+	InECT1Pkts       *float64
+	InECT0Pkts       *float64
+	InCEPkts         *float64
+}
+
+type Icmp6 struct {
+	InMsgs                    *float64
+	InErrors                  *float64
+	OutMsgs                   *float64
+	OutErrors                 *float64
+	InCsumErrors              *float64
+	InDestUnreachs            *float64
+	InPktTooBigs              *float64
+	InTimeExcds               *float64
+	InParmProblems            *float64
+	InEchos                   *float64
+	InEchoReplies             *float64
+	InGroupMembQueries        *float64
+	InGroupMembResponses      *float64
+	InGroupMembReductions     *float64
+	InRouterSolicits          *float64
+	InRouterAdvertisements    *float64
+	InNeighborSolicits        *float64
+	InNeighborAdvertisements  *float64
+	InRedirects               *float64
+	InMLDv2Reports            *float64
+	OutDestUnreachs           *float64
+	OutPktTooBigs             *float64
+	OutTimeExcds              *float64
+	OutParmProblems           *float64
+	OutEchos                  *float64
+	OutEchoReplies            *float64
+	OutGroupMembQueries       *float64
+	OutGroupMembResponses     *float64
+	OutGroupMembReductions    *float64
+	OutRouterSolicits         *float64
+	OutRouterAdvertisements   *float64
+	OutNeighborSolicits       *float64
+	OutNeighborAdvertisements *float64
+	OutRedirects              *float64
+	OutMLDv2Reports           *float64
+	InType1                   *float64
+	InType134                 *float64
+	InType135                 *float64
+	InType136                 *float64
+	InType143                 *float64
+	OutType133                *float64
+	OutType135                *float64
+	OutType136                *float64
+	OutType143                *float64
+}
+
+type Udp6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+	filename := p.path("net/snmp6")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		// On systems with IPv6 disabled, this file won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return ProcSnmp6{PID: p.PID}, nil
+		}
+
+		return ProcSnmp6{PID: p.PID}, err
+	}
+
+	procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+	procSnmp6.PID = p.PID
+	return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+	var (
+		scanner   = bufio.NewScanner(r)
+		procSnmp6 = ProcSnmp6{}
+	)
+
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		// Expect to have "6" in metric name, skip line otherwise
+		if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+			protocol := stat[0][:sixIndex+1]
+			key := stat[0][sixIndex+1:]
+			value, err := strconv.ParseFloat(stat[1], 64)
+			if err != nil {
+				return procSnmp6, err
+			}
+
+			switch protocol {
+			case "Ip6":
+				switch key {
+				case "InReceives":
+					procSnmp6.InReceives = &value
+				case "InHdrErrors":
+					procSnmp6.InHdrErrors = &value
+				case "InTooBigErrors":
+					procSnmp6.InTooBigErrors = &value
+				case "InNoRoutes":
+					procSnmp6.InNoRoutes = &value
+				case "InAddrErrors":
+					procSnmp6.InAddrErrors = &value
+				case "InUnknownProtos":
+					procSnmp6.InUnknownProtos = &value
+				case "InTruncatedPkts":
+					procSnmp6.InTruncatedPkts = &value
+				case "InDiscards":
+					procSnmp6.InDiscards = &value
+				case "InDelivers":
+					procSnmp6.InDelivers = &value
+				case "OutForwDatagrams":
+					procSnmp6.OutForwDatagrams = &value
+				case "OutRequests":
+					procSnmp6.OutRequests = &value
+				case "OutDiscards":
+					procSnmp6.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp6.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp6.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp6.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp6.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp6.ReasmFails = &value
+				case "FragOKs":
+					procSnmp6.FragOKs = &value
+				case "FragFails":
+					procSnmp6.FragFails = &value
+				case "FragCreates":
+					procSnmp6.FragCreates = &value
+				case "InMcastPkts":
+					procSnmp6.InMcastPkts = &value
+				case "OutMcastPkts":
+					procSnmp6.OutMcastPkts = &value
+				case "InOctets":
+					procSnmp6.InOctets = &value
+				case "OutOctets":
+					procSnmp6.OutOctets = &value
+				case "InMcastOctets":
+					procSnmp6.InMcastOctets = &value
+				case "OutMcastOctets":
+					procSnmp6.OutMcastOctets = &value
+				case "InBcastOctets":
+					procSnmp6.InBcastOctets = &value
+				case "OutBcastOctets":
+					procSnmp6.OutBcastOctets = &value
+				case "InNoECTPkts":
+					procSnmp6.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procSnmp6.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procSnmp6.InECT0Pkts = &value
+				case "InCEPkts":
+					procSnmp6.InCEPkts = &value
+
+				}
+			case "Icmp6":
+				switch key {
+				case "InMsgs":
+					procSnmp6.InMsgs = &value
+				case "InErrors":
+					procSnmp6.Icmp6.InErrors = &value
+				case "OutMsgs":
+					procSnmp6.OutMsgs = &value
+				case "OutErrors":
+					procSnmp6.OutErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Icmp6.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp6.InDestUnreachs = &value
+				case "InPktTooBigs":
+					procSnmp6.InPktTooBigs = &value
+				case "InTimeExcds":
+					procSnmp6.InTimeExcds = &value
+				case "InParmProblems":
+					procSnmp6.InParmProblems = &value
+				case "InEchos":
+					procSnmp6.InEchos = &value
+				case "InEchoReplies":
+					procSnmp6.InEchoReplies = &value
+				case "InGroupMembQueries":
+					procSnmp6.InGroupMembQueries = &value
+				case "InGroupMembResponses":
+					procSnmp6.InGroupMembResponses = &value
+				case "InGroupMembReductions":
+					procSnmp6.InGroupMembReductions = &value
+				case "InRouterSolicits":
+					procSnmp6.InRouterSolicits = &value
+				case "InRouterAdvertisements":
+					procSnmp6.InRouterAdvertisements = &value
+				case "InNeighborSolicits":
+					procSnmp6.InNeighborSolicits = &value
+				case "InNeighborAdvertisements":
+					procSnmp6.InNeighborAdvertisements = &value
+				case "InRedirects":
+					procSnmp6.InRedirects = &value
+				case "InMLDv2Reports":
+					procSnmp6.InMLDv2Reports = &value
+				case "OutDestUnreachs":
+					procSnmp6.OutDestUnreachs = &value
+				case "OutPktTooBigs":
+					procSnmp6.OutPktTooBigs = &value
+				case "OutTimeExcds":
+					procSnmp6.OutTimeExcds = &value
+				case "OutParmProblems":
+					procSnmp6.OutParmProblems = &value
+				case "OutEchos":
+					procSnmp6.OutEchos = &value
+				case "OutEchoReplies":
+					procSnmp6.OutEchoReplies = &value
+				case "OutGroupMembQueries":
+					procSnmp6.OutGroupMembQueries = &value
+				case "OutGroupMembResponses":
+					procSnmp6.OutGroupMembResponses = &value
+				case "OutGroupMembReductions":
+					procSnmp6.OutGroupMembReductions = &value
+				case "OutRouterSolicits":
+					procSnmp6.OutRouterSolicits = &value
+				case "OutRouterAdvertisements":
+					procSnmp6.OutRouterAdvertisements = &value
+				case "OutNeighborSolicits":
+					procSnmp6.OutNeighborSolicits = &value
+				case "OutNeighborAdvertisements":
+					procSnmp6.OutNeighborAdvertisements = &value
+				case "OutRedirects":
+					procSnmp6.OutRedirects = &value
+				case "OutMLDv2Reports":
+					procSnmp6.OutMLDv2Reports = &value
+				case "InType1":
+					procSnmp6.InType1 = &value
+				case "InType134":
+					procSnmp6.InType134 = &value
+				case "InType135":
+					procSnmp6.InType135 = &value
+				case "InType136":
+					procSnmp6.InType136 = &value
+				case "InType143":
+					procSnmp6.InType143 = &value
+				case "OutType133":
+					procSnmp6.OutType133 = &value
+				case "OutType135":
+					procSnmp6.OutType135 = &value
+				case "OutType136":
+					procSnmp6.OutType136 = &value
+				case "OutType143":
+					procSnmp6.OutType143 = &value
+				}
+			case "Udp6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.Udp6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.Udp6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.Udp6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.Udp6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.Udp6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.Udp6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Udp6.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp6.IgnoredMulti = &value
+				}
+			case "UdpLite6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.UdpLite6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.UdpLite6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.UdpLite6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.UdpLite6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.UdpLite6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.UdpLite6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.UdpLite6.InCsumErrors = &value
+				}
+			}
+		}
+	}
+	return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..06a8d93
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,229 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.  After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+	// The process ID.
+	PID int
+	// The filename of the executable.
+	Comm string
+	// The process state.
+	State string
+	// The PID of the parent of this process.
+	PPID int
+	// The process group ID of the process.
+	PGRP int
+	// The session ID of the process.
+	Session int
+	// The controlling terminal of the process.
+	TTY int
+	// The ID of the foreground process group of the controlling terminal of
+	// the process.
+	TPGID int
+	// The kernel flags word of the process.
+	Flags uint
+	// The number of minor faults the process has made which have not required
+	// loading a memory page from disk.
+	MinFlt uint
+	// The number of minor faults that the process's waited-for children have
+	// made.
+	CMinFlt uint
+	// The number of major faults the process has made which have required
+	// loading a memory page from disk.
+	MajFlt uint
+	// The number of major faults that the process's waited-for children have
+	// made.
+	CMajFlt uint
+	// Amount of time that this process has been scheduled in user mode,
+	// measured in clock ticks.
+	UTime uint
+	// Amount of time that this process has been scheduled in kernel mode,
+	// measured in clock ticks.
+	STime uint
+	// Amount of time that this process's waited-for children have been
+	// scheduled in user mode, measured in clock ticks.
+	CUTime int
+	// Amount of time that this process's waited-for children have been
+	// scheduled in kernel mode, measured in clock ticks.
+	CSTime int
+	// For processes running a real-time scheduling policy, this is the negated
+	// scheduling priority, minus one.
+	Priority int
+	// The nice value, a value in the range 19 (low priority) to -20 (high
+	// priority).
+	Nice int
+	// Number of threads in this process.
+	NumThreads int
+	// The time the process started after system boot, the value is expressed
+	// in clock ticks.
+	Starttime uint64
+	// Virtual memory size in bytes.
+	VSize uint
+	// Resident set size in pages.
+	RSS int
+	// Soft limit in bytes on the rss of the process.
+	RSSLimit uint64
+	// CPU number last executed on.
+	Processor uint
+	// Real-time scheduling priority, a number in the range 1 to 99 for processes
+	// scheduled under a real-time policy, or 0, for non-real-time processes.
+	RTPriority uint
+	// Scheduling policy.
+	Policy uint
+	// Aggregated block I/O delays, measured in clock ticks (centiseconds).
+	DelayAcctBlkIOTicks uint64
+	// Guest time of the process (time spent running a virtual CPU for a guest
+	// operating system), measured in clock ticks.
+	GuestTime int
+	// Guest time of the process's children, measured in clock ticks.
+	CGuestTime int
+
+	proc FS
+}
+
+// NewStat returns the current status information of the process.
+//
+// Deprecated: Use p.Stat() instead.
+func (p Proc) NewStat() (ProcStat, error) {
+	return p.Stat()
+}
+
+// Stat returns the current status information of the process.
+func (p Proc) Stat() (ProcStat, error) {
+	data, err := util.ReadFileNoStat(p.path("stat"))
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	var (
+		ignoreInt64  int64
+		ignoreUint64 uint64
+
+		s = ProcStat{PID: p.PID, proc: p.fs}
+		l = bytes.Index(data, []byte("("))
+		r = bytes.LastIndex(data, []byte(")"))
+	)
+
+	if l < 0 || r < 0 {
+		return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
+	}
+
+	s.Comm = string(data[l+1 : r])
+
+	// Check the following resources for the details about the particular stat
+	// fields and their data types:
+	// * https://man7.org/linux/man-pages/man5/proc.5.html
+	// * https://man7.org/linux/man-pages/man3/scanf.3.html
+	_, err = fmt.Fscan(
+		bytes.NewBuffer(data[r+2:]),
+		&s.State,
+		&s.PPID,
+		&s.PGRP,
+		&s.Session,
+		&s.TTY,
+		&s.TPGID,
+		&s.Flags,
+		&s.MinFlt,
+		&s.CMinFlt,
+		&s.MajFlt,
+		&s.CMajFlt,
+		&s.UTime,
+		&s.STime,
+		&s.CUTime,
+		&s.CSTime,
+		&s.Priority,
+		&s.Nice,
+		&s.NumThreads,
+		&ignoreInt64,
+		&s.Starttime,
+		&s.VSize,
+		&s.RSS,
+		&s.RSSLimit,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreInt64,
+		&s.Processor,
+		&s.RTPriority,
+		&s.Policy,
+		&s.DelayAcctBlkIOTicks,
+		&s.GuestTime,
+		&s.CGuestTime,
+	)
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() uint {
+	return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+	return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+	stat, err := s.proc.Stat()
+	if err != nil {
+		return 0, err
+	}
+	return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+	return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
new file mode 100644
index 0000000..dd8aa56
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -0,0 +1,242 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"math/bits"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcStatus provides status information about the process,
+// read from /proc/[pid]/status.
+type ProcStatus struct {
+	// The process ID.
+	PID int
+	// The process name.
+	Name string
+
+	// Thread group ID.
+	TGID int
+	// List of Pid namespace.
+	NSpids []uint64
+
+	// Peak virtual memory size.
+	VmPeak uint64 // nolint:revive
+	// Virtual memory size.
+	VmSize uint64 // nolint:revive
+	// Locked memory size.
+	VmLck uint64 // nolint:revive
+	// Pinned memory size.
+	VmPin uint64 // nolint:revive
+	// Peak resident set size.
+	VmHWM uint64 // nolint:revive
+	// Resident set size (sum of RssAnnon RssFile and RssShmem).
+	VmRSS uint64 // nolint:revive
+	// Size of resident anonymous memory.
+	RssAnon uint64 // nolint:revive
+	// Size of resident file mappings.
+	RssFile uint64 // nolint:revive
+	// Size of resident shared memory.
+	RssShmem uint64 // nolint:revive
+	// Size of data segments.
+	VmData uint64 // nolint:revive
+	// Size of stack segments.
+	VmStk uint64 // nolint:revive
+	// Size of text segments.
+	VmExe uint64 // nolint:revive
+	// Shared library code size.
+	VmLib uint64 // nolint:revive
+	// Page table entries size.
+	VmPTE uint64 // nolint:revive
+	// Size of second-level page tables.
+	VmPMD uint64 // nolint:revive
+	// Swapped-out virtual memory size by anonymous private.
+	VmSwap uint64 // nolint:revive
+	// Size of hugetlb memory portions
+	HugetlbPages uint64
+
+	// Number of voluntary context switches.
+	VoluntaryCtxtSwitches uint64
+	// Number of involuntary context switches.
+	NonVoluntaryCtxtSwitches uint64
+
+	// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
+	UIDs [4]uint64
+	// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
+	GIDs [4]uint64
+
+	// CpusAllowedList: List of cpu cores processes are allowed to run on.
+	CpusAllowedList []uint64
+}
+
+// NewStatus returns the current status information of the process.
+func (p Proc) NewStatus() (ProcStatus, error) {
+	data, err := util.ReadFileNoStat(p.path("status"))
+	if err != nil {
+		return ProcStatus{}, err
+	}
+
+	s := ProcStatus{PID: p.PID}
+
+	lines := strings.Split(string(data), "\n")
+	for _, line := range lines {
+		if !bytes.Contains([]byte(line), []byte(":")) {
+			continue
+		}
+
+		kv := strings.SplitN(line, ":", 2)
+
+		// removes spaces
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
+		// removes "kB"
+		v = strings.TrimSuffix(v, " kB")
+
+		// value to int when possible
+		// we can skip error check here, 'cause vKBytes is not used when value is a string
+		vKBytes, _ := strconv.ParseUint(v, 10, 64)
+		// convert kB to B
+		vBytes := vKBytes * 1024
+
+		err = s.fillStatus(k, v, vKBytes, vBytes)
+		if err != nil {
+			return ProcStatus{}, err
+		}
+	}
+
+	return s, nil
+}
+
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
+	switch k {
+	case "Tgid":
+		s.TGID = int(vUint)
+	case "Name":
+		s.Name = vString
+	case "Uid":
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
+	case "Gid":
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
+	case "NSpid":
+		nspids, err := calcNSPidsList(vString)
+		if err != nil {
+			return err
+		}
+		s.NSpids = nspids
+	case "VmPeak":
+		s.VmPeak = vUintBytes
+	case "VmSize":
+		s.VmSize = vUintBytes
+	case "VmLck":
+		s.VmLck = vUintBytes
+	case "VmPin":
+		s.VmPin = vUintBytes
+	case "VmHWM":
+		s.VmHWM = vUintBytes
+	case "VmRSS":
+		s.VmRSS = vUintBytes
+	case "RssAnon":
+		s.RssAnon = vUintBytes
+	case "RssFile":
+		s.RssFile = vUintBytes
+	case "RssShmem":
+		s.RssShmem = vUintBytes
+	case "VmData":
+		s.VmData = vUintBytes
+	case "VmStk":
+		s.VmStk = vUintBytes
+	case "VmExe":
+		s.VmExe = vUintBytes
+	case "VmLib":
+		s.VmLib = vUintBytes
+	case "VmPTE":
+		s.VmPTE = vUintBytes
+	case "VmPMD":
+		s.VmPMD = vUintBytes
+	case "VmSwap":
+		s.VmSwap = vUintBytes
+	case "HugetlbPages":
+		s.HugetlbPages = vUintBytes
+	case "voluntary_ctxt_switches":
+		s.VoluntaryCtxtSwitches = vUint
+	case "nonvoluntary_ctxt_switches":
+		s.NonVoluntaryCtxtSwitches = vUint
+	case "Cpus_allowed_list":
+		s.CpusAllowedList = calcCpusAllowedList(vString)
+	}
+
+	return nil
+}
+
+// TotalCtxtSwitches returns the total context switch.
+func (s ProcStatus) TotalCtxtSwitches() uint64 {
+	return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
+}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+	s := strings.Split(cpuString, ",")
+
+	var g []uint64
+
+	for _, cpu := range s {
+		// parse cpu ranges, example: 1-3=[1,2,3]
+		if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+			startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+			endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+			for i := startCPU; i <= endCPU; i++ {
+				g = append(g, i)
+			}
+		} else if len(l) == 1 {
+			cpu, _ := strconv.ParseUint(l[0], 10, 64)
+			g = append(g, cpu)
+		}
+
+	}
+
+	sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+	return g
+}
+
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+	s := strings.Split(nspidsString, "\t")
+	var nspids []uint64
+
+	for _, nspid := range s {
+		nspid, err := strconv.ParseUint(nspid, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		nspids = append(nspids, nspid)
+	}
+
+	return nspids, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000..3810d1a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+	return strings.ReplaceAll(sysctl, ".", "/")
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+	value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+	if err != nil {
+		return nil, err
+	}
+	return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+	fields, err := fs.SysctlStrings(sysctl)
+	if err != nil {
+		return nil, err
+	}
+
+	values := make([]int, len(fields))
+	for i, f := range fields {
+		vp := util.NewValueParser(f)
+		values[i] = vp.Int()
+		if err := vp.Err(); err != nil {
+			return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+		}
+	}
+	return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
new file mode 100644
index 0000000..5f7f32d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -0,0 +1,121 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+var (
+	cpuLineRE  = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
+	procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
+)
+
+// Schedstat contains scheduler statistics from /proc/schedstat
+//
+// See
+// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
+// for a detailed description of what these numbers mean.
+//
+// Note the current kernel documentation claims some of the time units are in
+// jiffies when they are actually in nanoseconds since 2.6.23 with the
+// introduction of CFS. A fix to the documentation is pending. See
+// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
+type Schedstat struct {
+	CPUs []*SchedstatCPU
+}
+
+// SchedstatCPU contains the values from one "cpu<N>" line.
+type SchedstatCPU struct {
+	CPUNum string
+
+	RunningNanoseconds uint64
+	WaitingNanoseconds uint64
+	RunTimeslices      uint64
+}
+
+// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
+type ProcSchedstat struct {
+	RunningNanoseconds uint64
+	WaitingNanoseconds uint64
+	RunTimeslices      uint64
+}
+
+// Schedstat reads data from `/proc/schedstat`.
+func (fs FS) Schedstat() (*Schedstat, error) {
+	file, err := os.Open(fs.proc.Path("schedstat"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	stats := &Schedstat{}
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		match := cpuLineRE.FindStringSubmatch(scanner.Text())
+		if match != nil {
+			cpu := &SchedstatCPU{}
+			cpu.CPUNum = match[1]
+
+			cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			stats.CPUs = append(stats.CPUs, cpu)
+		}
+	}
+
+	return stats, nil
+}
+
+func parseProcSchedstat(contents string) (ProcSchedstat, error) {
+	var (
+		stats ProcSchedstat
+		err   error
+	)
+	match := procLineRE.FindStringSubmatch(contents)
+
+	if match != nil {
+		stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
+		if err != nil {
+			return stats, err
+		}
+
+		stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
+		if err != nil {
+			return stats, err
+		}
+
+		stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
+		return stats, err
+	}
+
+	return stats, errors.New("could not parse schedstat")
+}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
new file mode 100644
index 0000000..8611c90
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	slabSpace  = regexp.MustCompile(`\s+`)
+	slabVer    = regexp.MustCompile(`slabinfo -`)
+	slabHeader = regexp.MustCompile(`# name`)
+)
+
+// Slab represents a slab pool in the kernel.
+type Slab struct {
+	Name         string
+	ObjActive    int64
+	ObjNum       int64
+	ObjSize      int64
+	ObjPerSlab   int64
+	PagesPerSlab int64
+	// tunables
+	Limit        int64
+	Batch        int64
+	SharedFactor int64
+	SlabActive   int64
+	SlabNum      int64
+	SharedAvail  int64
+}
+
+// SlabInfo represents info for all slabs.
+type SlabInfo struct {
+	Slabs []*Slab
+}
+
+func shouldParseSlab(line string) bool {
+	if slabVer.MatchString(line) {
+		return false
+	}
+	if slabHeader.MatchString(line) {
+		return false
+	}
+	return true
+}
+
+// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
+func parseV21SlabEntry(line string) (*Slab, error) {
+	// First cleanup whitespace.
+	l := slabSpace.ReplaceAllString(line, " ")
+	s := strings.Split(l, " ")
+	if len(s) != 16 {
+		return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
+	}
+	var err error
+	i := &Slab{Name: s[0]}
+	i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.Limit, err = strconv.ParseInt(s[8], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.Batch, err = strconv.ParseInt(s[9], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
+func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
+	scanner := bufio.NewScanner(r)
+	s := SlabInfo{Slabs: []*Slab{}}
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !shouldParseSlab(line) {
+			continue
+		}
+		slab, err := parseV21SlabEntry(line)
+		if err != nil {
+			return s, err
+		}
+		s.Slabs = append(s.Slabs, slab)
+	}
+	return s, nil
+}
+
+// SlabInfo reads data from `/proc/slabinfo`.
+func (fs FS) SlabInfo() (SlabInfo, error) {
+	// TODO: Consider passing options to allow for parsing different
+	// slabinfo versions. However, slabinfo 2.1 has been stable since
+	// kernel 2.6.10 and later.
+	data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
+	if err != nil {
+		return SlabInfo{}, err
+	}
+
+	return parseSlabInfo21(bytes.NewReader(data))
+}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000..403e6ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+	Hi      []uint64
+	Timer   []uint64
+	NetTx   []uint64
+	NetRx   []uint64
+	Block   []uint64
+	IRQPoll []uint64
+	Tasklet []uint64
+	Sched   []uint64
+	HRTimer []uint64
+	RCU     []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+	fileName := fs.proc.Path("softirqs")
+	data, err := util.ReadFileNoStat(fileName)
+	if err != nil {
+		return Softirqs{}, err
+	}
+
+	reader := bytes.NewReader(data)
+
+	return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+	var (
+		softirqs = Softirqs{}
+		scanner  = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
+	}
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		var err error
+
+		// require at least one cpu
+		if len(parts) < 2 {
+			continue
+		}
+		switch parts[0] {
+		case "HI:":
+			perCPU := parts[1:]
+			softirqs.Hi = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TIMER:":
+			perCPU := parts[1:]
+			softirqs.Timer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_TX:":
+			perCPU := parts[1:]
+			softirqs.NetTx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_RX:":
+			perCPU := parts[1:]
+			softirqs.NetRx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "BLOCK:":
+			perCPU := parts[1:]
+			softirqs.Block = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "IRQ_POLL:":
+			perCPU := parts[1:]
+			softirqs.IRQPoll = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TASKLET:":
+			perCPU := parts[1:]
+			softirqs.Tasklet = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "SCHED:":
+			perCPU := parts[1:]
+			softirqs.Sched = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "HRTIMER:":
+			perCPU := parts[1:]
+			softirqs.HRTimer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "RCU:":
+			perCPU := parts[1:]
+			softirqs.RCU = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
+	}
+
+	return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..e36b41c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,258 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/fs"
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+	User      float64
+	Nice      float64
+	System    float64
+	Idle      float64
+	Iowait    float64
+	IRQ       float64
+	SoftIRQ   float64
+	Steal     float64
+	Guest     float64
+	GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
+type SoftIRQStat struct {
+	Hi          uint64
+	Timer       uint64
+	NetTx       uint64
+	NetRx       uint64
+	Block       uint64
+	BlockIoPoll uint64
+	Tasklet     uint64
+	Sched       uint64
+	Hrtimer     uint64
+	Rcu         uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+	// Boot time in seconds since the Epoch.
+	BootTime uint64
+	// Summed up cpu statistics.
+	CPUTotal CPUStat
+	// Per-CPU statistics.
+	CPU map[int64]CPUStat
+	// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+	IRQTotal uint64
+	// Number of times a numbered IRQ was triggered.
+	IRQ []uint64
+	// Number of times a context switch happened.
+	ContextSwitches uint64
+	// Number of times a process was created.
+	ProcessCreated uint64
+	// Number of processes currently running.
+	ProcessesRunning uint64
+	// Number of processes currently blocked (waiting for IO).
+	ProcessesBlocked uint64
+	// Number of times a softirq was scheduled.
+	SoftIRQTotal uint64
+	// Detailed softirq statistics.
+	SoftIRQ SoftIRQStat
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+	cpuStat := CPUStat{}
+	var cpu string
+
+	count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+		&cpu,
+		&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+		&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+		&cpuStat.Guest, &cpuStat.GuestNice)
+
+	if err != nil && err != io.EOF {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
+	}
+	if count == 0 {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
+	}
+
+	cpuStat.User /= userHZ
+	cpuStat.Nice /= userHZ
+	cpuStat.System /= userHZ
+	cpuStat.Idle /= userHZ
+	cpuStat.Iowait /= userHZ
+	cpuStat.IRQ /= userHZ
+	cpuStat.SoftIRQ /= userHZ
+	cpuStat.Steal /= userHZ
+	cpuStat.Guest /= userHZ
+	cpuStat.GuestNice /= userHZ
+
+	if cpu == "cpu" {
+		return cpuStat, -1, nil
+	}
+
+	cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+	if err != nil {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
+	}
+
+	return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+	softIRQStat := SoftIRQStat{}
+	var total uint64
+	var prefix string
+
+	_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+		&prefix, &total,
+		&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+		&softIRQStat.Block, &softIRQStat.BlockIoPoll,
+		&softIRQStat.Tasklet, &softIRQStat.Sched,
+		&softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+	if err != nil {
+		return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
+	}
+
+	return softIRQStat, total, nil
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func NewStat() (Stat, error) {
+	fs, err := NewFS(fs.DefaultProcMountPoint)
+	if err != nil {
+		return Stat{}, err
+	}
+	return fs.Stat()
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func (fs FS) NewStat() (Stat, error) {
+	return fs.Stat()
+}
+
+// Stat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Stat() (Stat, error) {
+	fileName := fs.proc.Path("stat")
+	data, err := util.ReadFileNoStat(fileName)
+	if err != nil {
+		return Stat{}, err
+	}
+	procStat, err := parseStat(bytes.NewReader(data), fileName)
+	if err != nil {
+		return Stat{}, err
+	}
+	return procStat, nil
+}
+
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+	var (
+		scanner = bufio.NewScanner(r)
+		stat    = Stat{
+			CPU: make(map[int64]CPUStat),
+		}
+		err error
+	)
+
+	// Increase default scanner buffer to handle very long `intr` lines.
+	buf := make([]byte, 0, 8*1024)
+	scanner.Buffer(buf, 1024*1024)
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		parts := strings.Fields(scanner.Text())
+		// require at least <key> <value>
+		if len(parts) < 2 {
+			continue
+		}
+		switch {
+		case parts[0] == "btime":
+			if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "intr":
+			if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
+			}
+			numberedIRQs := parts[2:]
+			stat.IRQ = make([]uint64, len(numberedIRQs))
+			for i, count := range numberedIRQs {
+				if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case parts[0] == "ctxt":
+			if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "processes":
+			if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "procs_running":
+			if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "procs_blocked":
+			if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "softirq":
+			softIRQStats, total, err := parseSoftIRQStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			stat.SoftIRQTotal = total
+			stat.SoftIRQ = softIRQStats
+		case strings.HasPrefix(parts[0], "cpu"):
+			cpuStat, cpuID, err := parseCPUStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			if cpuID == -1 {
+				stat.CPUTotal = cpuStat
+			} else {
+				stat.CPU[cpuID] = cpuStat
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
+	}
+
+	return stat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
new file mode 100644
index 0000000..65fec83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -0,0 +1,89 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Swap represents an entry in /proc/swaps.
+type Swap struct {
+	Filename string
+	Type     string
+	Size     int
+	Used     int
+	Priority int
+}
+
+// Swaps returns a slice of all configured swap devices on the system.
+func (fs FS) Swaps() ([]*Swap, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
+	if err != nil {
+		return nil, err
+	}
+	return parseSwaps(data)
+}
+
+func parseSwaps(info []byte) ([]*Swap, error) {
+	swaps := []*Swap{}
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	scanner.Scan() // ignore header line
+	for scanner.Scan() {
+		swapString := scanner.Text()
+		parsedSwap, err := parseSwapString(swapString)
+		if err != nil {
+			return nil, err
+		}
+		swaps = append(swaps, parsedSwap)
+	}
+
+	err := scanner.Err()
+	return swaps, err
+}
+
+func parseSwapString(swapString string) (*Swap, error) {
+	var err error
+
+	swapFields := strings.Fields(swapString)
+	swapLength := len(swapFields)
+	if swapLength < 5 {
+		return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
+	}
+
+	swap := &Swap{
+		Filename: swapFields[0],
+		Type:     swapFields[1],
+	}
+
+	swap.Size, err = strconv.Atoi(swapFields[2])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
+	}
+	swap.Used, err = strconv.Atoi(swapFields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
+	}
+	swap.Priority, err = strconv.Atoi(swapFields[4])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
+	}
+
+	return swap, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000..80e0e94
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+
+	fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	d, err := os.Open(taskPath)
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+	}
+
+	t := Procs{}
+	for _, n := range names {
+		tid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+
+		t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+	}
+
+	return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	if _, err := os.Stat(taskPath); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+	tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+	if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100644
index 0000000..19ef02b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,413 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+#              - stores only filename, content, and mode
+#              - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'EOF', r'\EOF', line)
+    line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+    line = re.sub('\x00', r'NULLBYTE', line)
+    sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
+    line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
+    line = re.sub(r'([^\\])EOF', r'\1', line)
+    line = re.sub(r'\\EOF', 'EOF', line)
+    sys.stdout.write(line)
+PEF
+)
+
+function test_environment {
+    if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
+        echo "WARNING sed unable to handle null bytes, using Python (slow)."
+        if ! which python >/dev/null; then
+            echo "ERROR Python not found. Aborting."
+            exit 2
+        fi
+        USE_PYTHON=1
+    fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+    bname=$(basename "$0")
+    cat << USAGE
+Usage:   $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+         $bname            -t -f <ARCHIVE>           (list archive contents)
+         $bname [-C <DIR>] -x -f <ARCHIVE>           (extract archive)
+
+Options:
+         -C <DIR>           (change directory)
+         -v                 (verbose)
+         --recursive-unlink (recursively delete existing directory if path
+                             collides with file or directory to extract)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+         $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+    if [ "${VERBOSE:-}" == "yes" ]; then
+        echo >&7 "$@"
+    fi
+}
+
+function set_cmd {
+    if [ -n "$CMD" ]; then
+        echo "ERROR: more than one command given"
+        echo
+        usage 2
+    fi
+    CMD=$1
+}
+
+unset VERBOSE
+unset RECURSIVE_UNLINK
+
+while getopts :cf:-:htxvC: opt; do
+    case $opt in
+        c)
+            set_cmd "create"
+            ;;
+        f)
+            ARCHIVE=$OPTARG
+            ;;
+        h)
+            usage 0
+            ;;
+        t)
+            set_cmd "list"
+            ;;
+        x)
+            set_cmd "extract"
+            ;;
+        v)
+            VERBOSE=yes
+            exec 7>&1
+            ;;
+        C)
+            CDIR=$OPTARG
+            ;;
+        -)
+            case $OPTARG in
+                recursive-unlink)
+                    RECURSIVE_UNLINK="yes"
+                    ;;
+                *)
+                    echo -e "Error: invalid option -$OPTARG"
+                    echo
+                    usage 1
+                    ;;
+            esac
+            ;;
+        *)
+            echo >&2 "ERROR: invalid option -$OPTARG"
+            echo
+            usage 1
+            ;;
+    esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+    echo >&2 "ERROR: no command given"
+    echo
+    usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+    echo >&2 "ERROR: no archive name given"
+    echo
+    usage 1
+fi
+
+function list {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while read -r line; do
+        line_no=$(( line_no + 1 ))
+        if [ $size -gt 0 ]; then
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            echo "$path"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            echo "$path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            echo  "$path -> ${BASH_REMATCH[1]}"
+        fi
+    done < "$ttar_file"
+}
+
+function extract {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while IFS= read -r line; do
+        line_no=$(( line_no + 1 ))
+        local eof_without_newline
+        if [ "$size" -gt 0 ]; then
+            if [[ "$line" =~ [^\\]EOF ]]; then
+                # An EOF not preceded by a backslash indicates that the line
+                # does not end with a newline
+                eof_without_newline=1
+            else
+                eof_without_newline=0
+            fi
+            # Replace NULLBYTE with null byte if at beginning of line
+            # Replace NULLBYTE with null byte unless preceded by backslash
+            # Remove one backslash in front of NULLBYTE (if any)
+            # Remove EOF unless preceded by backslash
+            # Remove one backslash in front of EOF
+            if [ $USE_PYTHON -eq 1 ]; then
+                echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+            else
+                # The repeated pattern makes up for sed's lack of negative
+                # lookbehind assertions (for consecutive null bytes).
+                echo -n "$line" | \
+                    sed -e 's/^NULLBYTE/\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\\NULLBYTE/NULLBYTE/g;
+                            s/\([^\\]\)EOF/\1/g;
+                            s/\\EOF/EOF/g;
+                    ' >> "$path"
+            fi
+            if [[ "$eof_without_newline" -eq 0 ]]; then
+                echo >> "$path"
+            fi
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            if [ -L "$path" ]; then
+                rm "$path"
+            elif [ -d "$path" ]; then
+                if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
+                    rm -r "$path"
+                else
+                    # Safe because symlinks to directories are dealt with above
+                    rmdir "$path"
+                fi
+            elif [ -e "$path" ]; then
+                rm "$path"
+            fi
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            # Create file even if it is zero-length.
+            touch "$path"
+            vecho "    $path"
+        elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+            mode=${BASH_REMATCH[1]}
+            chmod "$mode" "$path"
+            vecho "$mode"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            mkdir -p "$path"
+            vecho "    $path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            ln -s "${BASH_REMATCH[1]}" "$path"
+            vecho "    $path -> ${BASH_REMATCH[1]}"
+        elif [[ $line =~ ^# ]]; then
+            # Ignore comments between files
+            continue
+        else
+            echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+            exit 1
+        fi
+    done < "$ttar_file"
+}
+
+function div {
+    echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+         "- - - - - -"
+}
+
+function get_mode {
+    local mfile=$1
+    if [ -z "${STAT_OPTION:-}" ]; then
+        if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+            # GNU stat
+            STAT_OPTION='-c'
+            STAT_FORMAT='%a'
+        else
+            # BSD stat
+            STAT_OPTION='-f'
+            # Octal output, user/group/other (omit file type, sticky bit)
+            STAT_FORMAT='%OLp'
+        fi
+    fi
+    stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+    shopt -s nullglob
+    local mode
+    local eof_without_newline
+    while (( "$#" )); do
+        file=$1
+        if [ -L "$file" ]; then
+            echo "Path: $file"
+            symlinkTo=$(readlink "$file")
+            echo "SymlinkTo: $symlinkTo"
+            vecho "    $file -> $symlinkTo"
+            div
+        elif [ -d "$file" ]; then
+            # Strip trailing slash (if there is one)
+            file=${file%/}
+            echo "Directory: $file"
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file/"
+            div
+            # Find all files and dirs, including hidden/dot files
+            for x in "$file/"{*,.[^.]*}; do
+                _create "$x"
+            done
+        elif [ -f "$file" ]; then
+            echo "Path: $file"
+            lines=$(wc -l "$file"|awk '{print $1}')
+            eof_without_newline=0
+            if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+                    [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+                eof_without_newline=1
+                lines=$((lines+1))
+            fi
+            echo "Lines: $lines"
+            # Add backslash in front of EOF
+            # Add backslash in front of NULLBYTE
+            # Replace null byte with NULLBYTE
+            if [ $USE_PYTHON -eq 1 ]; then
+                < "$file" python -c "$PYTHON_CREATE_FILTER"
+            else
+                < "$file" \
+                    sed 's/EOF/\\EOF/g;
+                            s/NULLBYTE/\\NULLBYTE/g;
+                            s/\x0/NULLBYTE/g;
+                    '
+            fi
+            if [[ "$eof_without_newline" -eq 1 ]]; then
+                # Finish line with EOF to indicate that the original line did
+                # not end with a linefeed
+                echo "EOF"
+            fi
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file"
+            div
+        else
+            echo >&2 "ERROR: file not found ($file in $(pwd))"
+            exit 2
+        fi
+        shift
+    done
+}
+
+function create {
+    ttar_file=$1
+    shift
+    if [ -z "${1:-}" ]; then
+        echo >&2 "ERROR: missing arguments."
+        echo
+        usage 1
+    fi
+    if [ -e "$ttar_file" ]; then
+        rm "$ttar_file"
+    fi
+    exec > "$ttar_file"
+    echo "# Archive created by ttar $ARG_STRING"
+    _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+    if [[ "$ARCHIVE" != /* ]]; then
+        # Relative path: preserve the archive's location before changing
+        # directory
+        ARCHIVE="$(pwd)/$ARCHIVE"
+    fi
+    cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
new file mode 100644
index 0000000..51c49d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -0,0 +1,212 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// The VM interface is described at
+//
+//	https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// Each setting is exposed as a single file.
+// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
+// and numa_zonelist_order (deprecated) which is a string.
+type VM struct {
+	AdminReserveKbytes        *int64   // /proc/sys/vm/admin_reserve_kbytes
+	BlockDump                 *int64   // /proc/sys/vm/block_dump
+	CompactUnevictableAllowed *int64   // /proc/sys/vm/compact_unevictable_allowed
+	DirtyBackgroundBytes      *int64   // /proc/sys/vm/dirty_background_bytes
+	DirtyBackgroundRatio      *int64   // /proc/sys/vm/dirty_background_ratio
+	DirtyBytes                *int64   // /proc/sys/vm/dirty_bytes
+	DirtyExpireCentisecs      *int64   // /proc/sys/vm/dirty_expire_centisecs
+	DirtyRatio                *int64   // /proc/sys/vm/dirty_ratio
+	DirtytimeExpireSeconds    *int64   // /proc/sys/vm/dirtytime_expire_seconds
+	DirtyWritebackCentisecs   *int64   // /proc/sys/vm/dirty_writeback_centisecs
+	DropCaches                *int64   // /proc/sys/vm/drop_caches
+	ExtfragThreshold          *int64   // /proc/sys/vm/extfrag_threshold
+	HugetlbShmGroup           *int64   // /proc/sys/vm/hugetlb_shm_group
+	LaptopMode                *int64   // /proc/sys/vm/laptop_mode
+	LegacyVaLayout            *int64   // /proc/sys/vm/legacy_va_layout
+	LowmemReserveRatio        []*int64 // /proc/sys/vm/lowmem_reserve_ratio
+	MaxMapCount               *int64   // /proc/sys/vm/max_map_count
+	MemoryFailureEarlyKill    *int64   // /proc/sys/vm/memory_failure_early_kill
+	MemoryFailureRecovery     *int64   // /proc/sys/vm/memory_failure_recovery
+	MinFreeKbytes             *int64   // /proc/sys/vm/min_free_kbytes
+	MinSlabRatio              *int64   // /proc/sys/vm/min_slab_ratio
+	MinUnmappedRatio          *int64   // /proc/sys/vm/min_unmapped_ratio
+	MmapMinAddr               *int64   // /proc/sys/vm/mmap_min_addr
+	NrHugepages               *int64   // /proc/sys/vm/nr_hugepages
+	NrHugepagesMempolicy      *int64   // /proc/sys/vm/nr_hugepages_mempolicy
+	NrOvercommitHugepages     *int64   // /proc/sys/vm/nr_overcommit_hugepages
+	NumaStat                  *int64   // /proc/sys/vm/numa_stat
+	NumaZonelistOrder         string   // /proc/sys/vm/numa_zonelist_order
+	OomDumpTasks              *int64   // /proc/sys/vm/oom_dump_tasks
+	OomKillAllocatingTask     *int64   // /proc/sys/vm/oom_kill_allocating_task
+	OvercommitKbytes          *int64   // /proc/sys/vm/overcommit_kbytes
+	OvercommitMemory          *int64   // /proc/sys/vm/overcommit_memory
+	OvercommitRatio           *int64   // /proc/sys/vm/overcommit_ratio
+	PageCluster               *int64   // /proc/sys/vm/page-cluster
+	PanicOnOom                *int64   // /proc/sys/vm/panic_on_oom
+	PercpuPagelistFraction    *int64   // /proc/sys/vm/percpu_pagelist_fraction
+	StatInterval              *int64   // /proc/sys/vm/stat_interval
+	Swappiness                *int64   // /proc/sys/vm/swappiness
+	UserReserveKbytes         *int64   // /proc/sys/vm/user_reserve_kbytes
+	VfsCachePressure          *int64   // /proc/sys/vm/vfs_cache_pressure
+	WatermarkBoostFactor      *int64   // /proc/sys/vm/watermark_boost_factor
+	WatermarkScaleFactor      *int64   // /proc/sys/vm/watermark_scale_factor
+	ZoneReclaimMode           *int64   // /proc/sys/vm/zone_reclaim_mode
+}
+
+// VM reads the VM statistics from the specified `proc` filesystem.
+func (fs FS) VM() (*VM, error) {
+	path := fs.proc.Path("sys/vm")
+	file, err := os.Stat(path)
+	if err != nil {
+		return nil, err
+	}
+	if !file.Mode().IsDir() {
+		return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
+	}
+
+	files, err := os.ReadDir(path)
+	if err != nil {
+		return nil, err
+	}
+
+	var vm VM
+	for _, f := range files {
+		if f.IsDir() {
+			continue
+		}
+
+		name := filepath.Join(path, f.Name())
+		// ignore errors on read, as there are some write only
+		// in /proc/sys/vm
+		value, err := util.SysReadFile(name)
+		if err != nil {
+			continue
+		}
+		vp := util.NewValueParser(value)
+
+		switch f.Name() {
+		case "admin_reserve_kbytes":
+			vm.AdminReserveKbytes = vp.PInt64()
+		case "block_dump":
+			vm.BlockDump = vp.PInt64()
+		case "compact_unevictable_allowed":
+			vm.CompactUnevictableAllowed = vp.PInt64()
+		case "dirty_background_bytes":
+			vm.DirtyBackgroundBytes = vp.PInt64()
+		case "dirty_background_ratio":
+			vm.DirtyBackgroundRatio = vp.PInt64()
+		case "dirty_bytes":
+			vm.DirtyBytes = vp.PInt64()
+		case "dirty_expire_centisecs":
+			vm.DirtyExpireCentisecs = vp.PInt64()
+		case "dirty_ratio":
+			vm.DirtyRatio = vp.PInt64()
+		case "dirtytime_expire_seconds":
+			vm.DirtytimeExpireSeconds = vp.PInt64()
+		case "dirty_writeback_centisecs":
+			vm.DirtyWritebackCentisecs = vp.PInt64()
+		case "drop_caches":
+			vm.DropCaches = vp.PInt64()
+		case "extfrag_threshold":
+			vm.ExtfragThreshold = vp.PInt64()
+		case "hugetlb_shm_group":
+			vm.HugetlbShmGroup = vp.PInt64()
+		case "laptop_mode":
+			vm.LaptopMode = vp.PInt64()
+		case "legacy_va_layout":
+			vm.LegacyVaLayout = vp.PInt64()
+		case "lowmem_reserve_ratio":
+			stringSlice := strings.Fields(value)
+			pint64Slice := make([]*int64, 0, len(stringSlice))
+			for _, value := range stringSlice {
+				vp := util.NewValueParser(value)
+				pint64Slice = append(pint64Slice, vp.PInt64())
+			}
+			vm.LowmemReserveRatio = pint64Slice
+		case "max_map_count":
+			vm.MaxMapCount = vp.PInt64()
+		case "memory_failure_early_kill":
+			vm.MemoryFailureEarlyKill = vp.PInt64()
+		case "memory_failure_recovery":
+			vm.MemoryFailureRecovery = vp.PInt64()
+		case "min_free_kbytes":
+			vm.MinFreeKbytes = vp.PInt64()
+		case "min_slab_ratio":
+			vm.MinSlabRatio = vp.PInt64()
+		case "min_unmapped_ratio":
+			vm.MinUnmappedRatio = vp.PInt64()
+		case "mmap_min_addr":
+			vm.MmapMinAddr = vp.PInt64()
+		case "nr_hugepages":
+			vm.NrHugepages = vp.PInt64()
+		case "nr_hugepages_mempolicy":
+			vm.NrHugepagesMempolicy = vp.PInt64()
+		case "nr_overcommit_hugepages":
+			vm.NrOvercommitHugepages = vp.PInt64()
+		case "numa_stat":
+			vm.NumaStat = vp.PInt64()
+		case "numa_zonelist_order":
+			vm.NumaZonelistOrder = value
+		case "oom_dump_tasks":
+			vm.OomDumpTasks = vp.PInt64()
+		case "oom_kill_allocating_task":
+			vm.OomKillAllocatingTask = vp.PInt64()
+		case "overcommit_kbytes":
+			vm.OvercommitKbytes = vp.PInt64()
+		case "overcommit_memory":
+			vm.OvercommitMemory = vp.PInt64()
+		case "overcommit_ratio":
+			vm.OvercommitRatio = vp.PInt64()
+		case "page-cluster":
+			vm.PageCluster = vp.PInt64()
+		case "panic_on_oom":
+			vm.PanicOnOom = vp.PInt64()
+		case "percpu_pagelist_fraction":
+			vm.PercpuPagelistFraction = vp.PInt64()
+		case "stat_interval":
+			vm.StatInterval = vp.PInt64()
+		case "swappiness":
+			vm.Swappiness = vp.PInt64()
+		case "user_reserve_kbytes":
+			vm.UserReserveKbytes = vp.PInt64()
+		case "vfs_cache_pressure":
+			vm.VfsCachePressure = vp.PInt64()
+		case "watermark_boost_factor":
+			vm.WatermarkBoostFactor = vp.PInt64()
+		case "watermark_scale_factor":
+			vm.WatermarkScaleFactor = vp.PInt64()
+		case "zone_reclaim_mode":
+			vm.ZoneReclaimMode = vp.PInt64()
+		}
+		if err := vp.Err(); err != nil {
+			return nil, err
+		}
+	}
+
+	return &vm, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
new file mode 100644
index 0000000..e54d94b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Zoneinfo holds info parsed from /proc/zoneinfo.
+type Zoneinfo struct {
+	Node                       string
+	Zone                       string
+	NrFreePages                *int64
+	Min                        *int64
+	Low                        *int64
+	High                       *int64
+	Scanned                    *int64
+	Spanned                    *int64
+	Present                    *int64
+	Managed                    *int64
+	NrActiveAnon               *int64
+	NrInactiveAnon             *int64
+	NrIsolatedAnon             *int64
+	NrAnonPages                *int64
+	NrAnonTransparentHugepages *int64
+	NrActiveFile               *int64
+	NrInactiveFile             *int64
+	NrIsolatedFile             *int64
+	NrFilePages                *int64
+	NrSlabReclaimable          *int64
+	NrSlabUnreclaimable        *int64
+	NrMlockStack               *int64
+	NrKernelStack              *int64
+	NrMapped                   *int64
+	NrDirty                    *int64
+	NrWriteback                *int64
+	NrUnevictable              *int64
+	NrShmem                    *int64
+	NrDirtied                  *int64
+	NrWritten                  *int64
+	NumaHit                    *int64
+	NumaMiss                   *int64
+	NumaForeign                *int64
+	NumaInterleave             *int64
+	NumaLocal                  *int64
+	NumaOther                  *int64
+	Protection                 []*int64
+}
+
+var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
+
+// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
+	data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
+	if err != nil {
+		return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
+	}
+	zoneinfo, err := parseZoneinfo(data)
+	if err != nil {
+		return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
+	}
+	return zoneinfo, nil
+}
+
+func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
+
+	zoneinfo := []Zoneinfo{}
+
+	zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
+	for _, block := range zoneinfoBlocks {
+		var zoneinfoElement Zoneinfo
+		lines := strings.Split(string(block), "\n")
+		for _, line := range lines {
+
+			if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
+				zoneinfoElement.Node = nodeZone[1]
+				zoneinfoElement.Zone = nodeZone[2]
+				continue
+			}
+			if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
+				continue
+			}
+			parts := strings.Fields(strings.TrimSpace(line))
+			if len(parts) < 2 {
+				continue
+			}
+			vp := util.NewValueParser(parts[1])
+			switch parts[0] {
+			case "nr_free_pages":
+				zoneinfoElement.NrFreePages = vp.PInt64()
+			case "min":
+				zoneinfoElement.Min = vp.PInt64()
+			case "low":
+				zoneinfoElement.Low = vp.PInt64()
+			case "high":
+				zoneinfoElement.High = vp.PInt64()
+			case "scanned":
+				zoneinfoElement.Scanned = vp.PInt64()
+			case "spanned":
+				zoneinfoElement.Spanned = vp.PInt64()
+			case "present":
+				zoneinfoElement.Present = vp.PInt64()
+			case "managed":
+				zoneinfoElement.Managed = vp.PInt64()
+			case "nr_active_anon":
+				zoneinfoElement.NrActiveAnon = vp.PInt64()
+			case "nr_inactive_anon":
+				zoneinfoElement.NrInactiveAnon = vp.PInt64()
+			case "nr_isolated_anon":
+				zoneinfoElement.NrIsolatedAnon = vp.PInt64()
+			case "nr_anon_pages":
+				zoneinfoElement.NrAnonPages = vp.PInt64()
+			case "nr_anon_transparent_hugepages":
+				zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
+			case "nr_active_file":
+				zoneinfoElement.NrActiveFile = vp.PInt64()
+			case "nr_inactive_file":
+				zoneinfoElement.NrInactiveFile = vp.PInt64()
+			case "nr_isolated_file":
+				zoneinfoElement.NrIsolatedFile = vp.PInt64()
+			case "nr_file_pages":
+				zoneinfoElement.NrFilePages = vp.PInt64()
+			case "nr_slab_reclaimable":
+				zoneinfoElement.NrSlabReclaimable = vp.PInt64()
+			case "nr_slab_unreclaimable":
+				zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
+			case "nr_mlock_stack":
+				zoneinfoElement.NrMlockStack = vp.PInt64()
+			case "nr_kernel_stack":
+				zoneinfoElement.NrKernelStack = vp.PInt64()
+			case "nr_mapped":
+				zoneinfoElement.NrMapped = vp.PInt64()
+			case "nr_dirty":
+				zoneinfoElement.NrDirty = vp.PInt64()
+			case "nr_writeback":
+				zoneinfoElement.NrWriteback = vp.PInt64()
+			case "nr_unevictable":
+				zoneinfoElement.NrUnevictable = vp.PInt64()
+			case "nr_shmem":
+				zoneinfoElement.NrShmem = vp.PInt64()
+			case "nr_dirtied":
+				zoneinfoElement.NrDirtied = vp.PInt64()
+			case "nr_written":
+				zoneinfoElement.NrWritten = vp.PInt64()
+			case "numa_hit":
+				zoneinfoElement.NumaHit = vp.PInt64()
+			case "numa_miss":
+				zoneinfoElement.NumaMiss = vp.PInt64()
+			case "numa_foreign":
+				zoneinfoElement.NumaForeign = vp.PInt64()
+			case "numa_interleave":
+				zoneinfoElement.NumaInterleave = vp.PInt64()
+			case "numa_local":
+				zoneinfoElement.NumaLocal = vp.PInt64()
+			case "numa_other":
+				zoneinfoElement.NumaOther = vp.PInt64()
+			case "protection:":
+				protectionParts := strings.Split(line, ":")
+				protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
+				protectionValues = strings.Replace(protectionValues, ")", "", 1)
+				protectionValues = strings.TrimSpace(protectionValues)
+				protectionStringMap := strings.Split(protectionValues, ", ")
+				val, err := util.ParsePInt64s(protectionStringMap)
+				if err == nil {
+					zoneinfoElement.Protection = val
+				}
+			}
+
+		}
+
+		zoneinfo = append(zoneinfo, zoneinfoElement)
+	}
+	return zoneinfo, nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
index 27ddfee..6492bfe 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -7,6 +7,15 @@
 
 Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
 
+Archived as of April 1 2025
+-----
+This repository is no longer maintained. The authors recommend you explore the
+following newer, more widely adopted libraries for your Go instrumentation
+needs:
+
+* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics)
+* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus)
+
 Usage
 -----
 
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 95d8e59..ffb24e8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -7,10 +7,13 @@
 	"time"
 )
 
-type CompareType int
+// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
+type CompareType = compareResult
+
+type compareResult int
 
 const (
-	compareLess CompareType = iota - 1
+	compareLess compareResult = iota - 1
 	compareEqual
 	compareGreater
 )
@@ -28,6 +31,8 @@
 	uint32Type = reflect.TypeOf(uint32(1))
 	uint64Type = reflect.TypeOf(uint64(1))
 
+	uintptrType = reflect.TypeOf(uintptr(1))
+
 	float32Type = reflect.TypeOf(float32(1))
 	float64Type = reflect.TypeOf(float64(1))
 
@@ -37,7 +42,7 @@
 	bytesType = reflect.TypeOf([]byte{})
 )
 
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
 	obj1Value := reflect.ValueOf(obj1)
 	obj2Value := reflect.ValueOf(obj2)
 
@@ -308,11 +313,11 @@
 	case reflect.Struct:
 		{
 			// All structs enter here. We're not interested in most types.
-			if !canConvert(obj1Value, timeType) {
+			if !obj1Value.CanConvert(timeType) {
 				break
 			}
 
-			// time.Time can compared!
+			// time.Time can be compared!
 			timeObj1, ok := obj1.(time.Time)
 			if !ok {
 				timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@@ -323,12 +328,18 @@
 				timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
 			}
 
-			return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+			if timeObj1.Before(timeObj2) {
+				return compareLess, true
+			}
+			if timeObj1.Equal(timeObj2) {
+				return compareEqual, true
+			}
+			return compareGreater, true
 		}
 	case reflect.Slice:
 		{
 			// We only care about the []byte type.
-			if !canConvert(obj1Value, bytesType) {
+			if !obj1Value.CanConvert(bytesType) {
 				break
 			}
 
@@ -343,7 +354,27 @@
 				bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
 			}
 
-			return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+			return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
+		}
+	case reflect.Uintptr:
+		{
+			uintptrObj1, ok := obj1.(uintptr)
+			if !ok {
+				uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
+			}
+			uintptrObj2, ok := obj2.(uintptr)
+			if !ok {
+				uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
+			}
+			if uintptrObj1 > uintptrObj2 {
+				return compareGreater, true
+			}
+			if uintptrObj1 == uintptrObj2 {
+				return compareEqual, true
+			}
+			if uintptrObj1 < uintptrObj2 {
+				return compareLess, true
+			}
 		}
 	}
 
@@ -352,79 +383,85 @@
 
 // Greater asserts that the first element is greater than the second
 //
-//    assert.Greater(t, 2, 1)
-//    assert.Greater(t, float64(2), float64(1))
-//    assert.Greater(t, "b", "a")
+//	assert.Greater(t, 2, 1)
+//	assert.Greater(t, float64(2), float64(1))
+//	assert.Greater(t, "b", "a")
 func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
 }
 
 // GreaterOrEqual asserts that the first element is greater than or equal to the second
 //
-//    assert.GreaterOrEqual(t, 2, 1)
-//    assert.GreaterOrEqual(t, 2, 2)
-//    assert.GreaterOrEqual(t, "b", "a")
-//    assert.GreaterOrEqual(t, "b", "b")
+//	assert.GreaterOrEqual(t, 2, 1)
+//	assert.GreaterOrEqual(t, 2, 2)
+//	assert.GreaterOrEqual(t, "b", "a")
+//	assert.GreaterOrEqual(t, "b", "b")
 func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
 }
 
 // Less asserts that the first element is less than the second
 //
-//    assert.Less(t, 1, 2)
-//    assert.Less(t, float64(1), float64(2))
-//    assert.Less(t, "a", "b")
+//	assert.Less(t, 1, 2)
+//	assert.Less(t, float64(1), float64(2))
+//	assert.Less(t, "a", "b")
 func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
 }
 
 // LessOrEqual asserts that the first element is less than or equal to the second
 //
-//    assert.LessOrEqual(t, 1, 2)
-//    assert.LessOrEqual(t, 2, 2)
-//    assert.LessOrEqual(t, "a", "b")
-//    assert.LessOrEqual(t, "b", "b")
+//	assert.LessOrEqual(t, 1, 2)
+//	assert.LessOrEqual(t, 2, 2)
+//	assert.LessOrEqual(t, "a", "b")
+//	assert.LessOrEqual(t, "b", "b")
 func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
 }
 
 // Positive asserts that the specified element is positive
 //
-//    assert.Positive(t, 1)
-//    assert.Positive(t, 1.23)
+//	assert.Positive(t, 1)
+//	assert.Positive(t, 1.23)
 func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 	zero := reflect.Zero(reflect.TypeOf(e))
-	return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not positive", e)
+	return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
 }
 
 // Negative asserts that the specified element is negative
 //
-//    assert.Negative(t, -1)
-//    assert.Negative(t, -1.23)
+//	assert.Negative(t, -1)
+//	assert.Negative(t, -1.23)
 func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 	zero := reflect.Zero(reflect.TypeOf(e))
-	return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not negative", e)
+	return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
 }
 
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
@@ -437,17 +474,17 @@
 
 	compareResult, isComparable := compare(e1, e2, e1Kind)
 	if !isComparable {
-		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+		return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
 	}
 
 	if !containsValue(allowedComparesResults, compareResult) {
-		return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+		return Fail(t, failMessage, msgAndArgs...)
 	}
 
 	return true
 }
 
-func containsValue(values []CompareType, value CompareType) bool {
+func containsValue(values []compareResult, value compareResult) bool {
 	for _, v := range values {
 		if v == value {
 			return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
deleted file mode 100644
index da86790..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-//       merged/removed with assertion_compare_go1.17_test.go and
-//       assertion_compare_legacy.go
-
-package assert
-
-import "reflect"
-
-// Wrapper around reflect.Value.CanConvert, for compatibility
-// reasons.
-func canConvert(value reflect.Value, to reflect.Type) bool {
-	return value.CanConvert(to)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
deleted file mode 100644
index 1701af2..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-//       merged/removed with assertion_compare_go1.17_test.go and
-//       assertion_compare_can_convert.go
-
-package assert
-
-import "reflect"
-
-// Older versions of Go does not have the reflect.Value.CanConvert
-// method.
-func canConvert(value reflect.Value, to reflect.Type) bool {
-	return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 7880b8f..c592f6a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
 
 package assert
 
@@ -22,9 +19,9 @@
 // Containsf asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-//    assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-//    assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//	assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+//	assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+//	assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
 func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -53,10 +50,19 @@
 	return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
 }
 
-// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
 //
-//  assert.Emptyf(t, obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -66,7 +72,7 @@
 
 // Equalf asserts that two objects are equal.
 //
-//    assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//	assert.Equalf(t, 123, 123, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -81,8 +87,8 @@
 // EqualErrorf asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
 func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -90,10 +96,27 @@
 	return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
 }
 
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
 //
-//    assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+//	 assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+//	assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
 func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -103,10 +126,8 @@
 
 // Errorf asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.Errorf(t, err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedErrorf, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	assert.Errorf(t, err, "error message %s", "formatted")
 func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -126,8 +147,8 @@
 // ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   assert.ErrorContainsf(t, err,  expectedErrorSubString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	assert.ErrorContainsf(t, err,  expectedErrorSubString, "error message %s", "formatted")
 func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -147,7 +168,7 @@
 // Eventuallyf asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -155,9 +176,34 @@
 	return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
 }
 
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+//	assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
 func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -183,7 +229,7 @@
 
 // Falsef asserts that the specified value is false.
 //
-//    assert.Falsef(t, myBool, "error message %s", "formatted")
+//	assert.Falsef(t, myBool, "error message %s", "formatted")
 func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -202,9 +248,9 @@
 
 // Greaterf asserts that the first element is greater than the second
 //
-//    assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-//    assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-//    assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+//	assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+//	assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+//	assert.Greaterf(t, "b", "a", "error message %s", "formatted")
 func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -214,10 +260,10 @@
 
 // GreaterOrEqualf asserts that the first element is greater than or equal to the second
 //
-//    assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
 func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -228,7 +274,7 @@
 // HTTPBodyContainsf asserts that a specified handler returns a
 // body that contains a string.
 //
-//  assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -241,7 +287,7 @@
 // HTTPBodyNotContainsf asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -253,7 +299,7 @@
 
 // HTTPErrorf asserts that a specified handler returns an error status code.
 //
-//  assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -265,7 +311,7 @@
 
 // HTTPRedirectf asserts that a specified handler returns a redirect status code.
 //
-//  assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -277,7 +323,7 @@
 
 // HTTPStatusCodef asserts that a specified handler returns a specified status code.
 //
-//  assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//	assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -289,7 +335,7 @@
 
 // HTTPSuccessf asserts that a specified handler returns a success status code.
 //
-//  assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//	assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -301,7 +347,7 @@
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//	assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
 func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -311,7 +357,7 @@
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+//	assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
 func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -353,9 +399,9 @@
 
 // IsDecreasingf asserts that the collection is decreasing
 //
-//    assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-//    assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-//    assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
 func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -365,9 +411,9 @@
 
 // IsIncreasingf asserts that the collection is increasing
 //
-//    assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-//    assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-//    assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
 func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -377,9 +423,9 @@
 
 // IsNonDecreasingf asserts that the collection is not decreasing
 //
-//    assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-//    assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-//    assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
 func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -389,9 +435,9 @@
 
 // IsNonIncreasingf asserts that the collection is not increasing
 //
-//    assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-//    assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-//    assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
 func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -399,7 +445,19 @@
 	return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
 }
 
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+//	assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
+}
+
 // IsTypef asserts that the specified objects are of the same type.
+//
+//	assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
 func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -409,7 +467,7 @@
 
 // JSONEqf asserts that two JSON strings are equivalent.
 //
-//  assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//	assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
 func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -420,7 +478,7 @@
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
-//    assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//	assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
 func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -430,9 +488,9 @@
 
 // Lessf asserts that the first element is less than the second
 //
-//    assert.Lessf(t, 1, 2, "error message %s", "formatted")
-//    assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-//    assert.Lessf(t, "a", "b", "error message %s", "formatted")
+//	assert.Lessf(t, 1, 2, "error message %s", "formatted")
+//	assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+//	assert.Lessf(t, "a", "b", "error message %s", "formatted")
 func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -442,10 +500,10 @@
 
 // LessOrEqualf asserts that the first element is less than or equal to the second
 //
-//    assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-//    assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-//    assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-//    assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+//	assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+//	assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+//	assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+//	assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
 func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -455,8 +513,8 @@
 
 // Negativef asserts that the specified element is negative
 //
-//    assert.Negativef(t, -1, "error message %s", "formatted")
-//    assert.Negativef(t, -1.23, "error message %s", "formatted")
+//	assert.Negativef(t, -1, "error message %s", "formatted")
+//	assert.Negativef(t, -1.23, "error message %s", "formatted")
 func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -467,7 +525,7 @@
 // Neverf asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -477,7 +535,7 @@
 
 // Nilf asserts that the specified object is nil.
 //
-//    assert.Nilf(t, err, "error message %s", "formatted")
+//	assert.Nilf(t, err, "error message %s", "formatted")
 func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -496,10 +554,10 @@
 
 // NoErrorf asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if assert.NoErrorf(t, err, "error message %s", "formatted") {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -519,9 +577,9 @@
 // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-//    assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-//    assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
 func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -529,12 +587,28 @@
 	return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
 }
 
-// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
 //
-//  if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-//    assert.Equal(t, "two", obj[1])
-//  }
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT [Empty].
+//
+//	if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -544,7 +618,7 @@
 
 // NotEqualf asserts that the specified values are NOT equal.
 //
-//    assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//	assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -557,7 +631,7 @@
 
 // NotEqualValuesf asserts that two objects are not equal even when converted to the same type
 //
-//    assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+//	assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
 func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -565,7 +639,16 @@
 	return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -574,9 +657,19 @@
 	return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
 }
 
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+//	assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
 // NotNilf asserts that the specified object is not nil.
 //
-//    assert.NotNilf(t, err, "error message %s", "formatted")
+//	assert.NotNilf(t, err, "error message %s", "formatted")
 func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -586,7 +679,7 @@
 
 // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//	assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
 func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -596,8 +689,8 @@
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-//  assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//	assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//	assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
 func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -607,7 +700,7 @@
 
 // NotSamef asserts that two pointers do not reference the same object.
 //
-//    assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//	assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -618,10 +711,15 @@
 	return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//	assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+//	assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+//	assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
 func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -639,7 +737,7 @@
 
 // Panicsf asserts that the code inside the specified PanicTestFunc panics.
 //
-//   assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
 func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -651,7 +749,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -662,7 +760,7 @@
 // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -672,8 +770,8 @@
 
 // Positivef asserts that the specified element is positive
 //
-//    assert.Positivef(t, 1, "error message %s", "formatted")
-//    assert.Positivef(t, 1.23, "error message %s", "formatted")
+//	assert.Positivef(t, 1, "error message %s", "formatted")
+//	assert.Positivef(t, 1.23, "error message %s", "formatted")
 func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -683,8 +781,8 @@
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-//  assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//	assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//	assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
 func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -694,7 +792,7 @@
 
 // Samef asserts that two pointers reference the same object.
 //
-//    assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//	assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -705,10 +803,15 @@
 	return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//	assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+//	assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+//	assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
 func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -718,7 +821,7 @@
 
 // Truef asserts that the specified value is true.
 //
-//    assert.Truef(t, myBool, "error message %s", "formatted")
+//	assert.Truef(t, myBool, "error message %s", "formatted")
 func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -728,7 +831,7 @@
 
 // WithinDurationf asserts that the two times are within duration delta of each other.
 //
-//   assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//	assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
 func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -738,7 +841,7 @@
 
 // WithinRangef asserts that a time is within a time range (inclusive).
 //
-//   assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+//	assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
 func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 339515b..58db928 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
 
 package assert
 
@@ -30,9 +27,9 @@
 // Contains asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    a.Contains("Hello World", "World")
-//    a.Contains(["Hello", "World"], "World")
-//    a.Contains({"Hello": "World"}, "Hello")
+//	a.Contains("Hello World", "World")
+//	a.Contains(["Hello", "World"], "World")
+//	a.Contains({"Hello": "World"}, "Hello")
 func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -43,9 +40,9 @@
 // Containsf asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    a.Containsf("Hello World", "World", "error message %s", "formatted")
-//    a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
-//    a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//	a.Containsf("Hello World", "World", "error message %s", "formatted")
+//	a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+//	a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
 func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -95,10 +92,19 @@
 	return ElementsMatchf(a.t, listA, listB, msg, args...)
 }
 
-// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
 //
-//  a.Empty(obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -106,10 +112,19 @@
 	return Empty(a.t, object, msgAndArgs...)
 }
 
-// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
 //
-//  a.Emptyf(obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -119,7 +134,7 @@
 
 // Equal asserts that two objects are equal.
 //
-//    a.Equal(123, 123)
+//	a.Equal(123, 123)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -134,8 +149,8 @@
 // EqualError asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   a.EqualError(err,  expectedErrorString)
+//	actualObj, err := SomeFunction()
+//	a.EqualError(err,  expectedErrorString)
 func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -146,8 +161,8 @@
 // EqualErrorf asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
 func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -155,10 +170,44 @@
 	return EqualErrorf(a.t, theError, errString, msg, args...)
 }
 
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
 //
-//    a.EqualValues(uint32(123), int32(123))
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 a.EqualExportedValues(S{1, 2}, S{1, 3}) => true
+//	 a.EqualExportedValues(S{1, 2}, S{2, 3}) => false
+func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+//	 a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValuesf(a.t, expected, actual, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+//	a.EqualValues(uint32(123), int32(123))
 func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -166,10 +215,10 @@
 	return EqualValues(a.t, expected, actual, msgAndArgs...)
 }
 
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
 //
-//    a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+//	a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
 func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -179,7 +228,7 @@
 
 // Equalf asserts that two objects are equal.
 //
-//    a.Equalf(123, 123, "error message %s", "formatted")
+//	a.Equalf(123, 123, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -193,10 +242,8 @@
 
 // Error asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.Error(err) {
-// 	   assert.Equal(t, expectedError, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	a.Error(err)
 func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -225,8 +272,8 @@
 // ErrorContains asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   a.ErrorContains(err,  expectedErrorSubString)
+//	actualObj, err := SomeFunction()
+//	a.ErrorContains(err,  expectedErrorSubString)
 func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -237,8 +284,8 @@
 // ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   a.ErrorContainsf(err,  expectedErrorSubString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	a.ErrorContainsf(err,  expectedErrorSubString, "error message %s", "formatted")
 func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -266,10 +313,8 @@
 
 // Errorf asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.Errorf(err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedErrorf, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	a.Errorf(err, "error message %s", "formatted")
 func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -280,7 +325,7 @@
 // Eventually asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+//	a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
 func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -288,10 +333,60 @@
 	return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
 }
 
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	a.EventuallyWithT(func(c *assert.CollectT) {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...)
+}
+
 // Eventuallyf asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -301,7 +396,7 @@
 
 // Exactly asserts that two objects are equal in value and type.
 //
-//    a.Exactly(int32(123), int64(123))
+//	a.Exactly(int32(123), int64(123))
 func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -311,7 +406,7 @@
 
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+//	a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
 func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -353,7 +448,7 @@
 
 // False asserts that the specified value is false.
 //
-//    a.False(myBool)
+//	a.False(myBool)
 func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -363,7 +458,7 @@
 
 // Falsef asserts that the specified value is false.
 //
-//    a.Falsef(myBool, "error message %s", "formatted")
+//	a.Falsef(myBool, "error message %s", "formatted")
 func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -391,9 +486,9 @@
 
 // Greater asserts that the first element is greater than the second
 //
-//    a.Greater(2, 1)
-//    a.Greater(float64(2), float64(1))
-//    a.Greater("b", "a")
+//	a.Greater(2, 1)
+//	a.Greater(float64(2), float64(1))
+//	a.Greater("b", "a")
 func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -403,10 +498,10 @@
 
 // GreaterOrEqual asserts that the first element is greater than or equal to the second
 //
-//    a.GreaterOrEqual(2, 1)
-//    a.GreaterOrEqual(2, 2)
-//    a.GreaterOrEqual("b", "a")
-//    a.GreaterOrEqual("b", "b")
+//	a.GreaterOrEqual(2, 1)
+//	a.GreaterOrEqual(2, 2)
+//	a.GreaterOrEqual("b", "a")
+//	a.GreaterOrEqual("b", "b")
 func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -416,10 +511,10 @@
 
 // GreaterOrEqualf asserts that the first element is greater than or equal to the second
 //
-//    a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
-//    a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
-//    a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
-//    a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+//	a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+//	a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+//	a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+//	a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
 func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -429,9 +524,9 @@
 
 // Greaterf asserts that the first element is greater than the second
 //
-//    a.Greaterf(2, 1, "error message %s", "formatted")
-//    a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
-//    a.Greaterf("b", "a", "error message %s", "formatted")
+//	a.Greaterf(2, 1, "error message %s", "formatted")
+//	a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+//	a.Greaterf("b", "a", "error message %s", "formatted")
 func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -442,7 +537,7 @@
 // HTTPBodyContains asserts that a specified handler returns a
 // body that contains a string.
 //
-//  a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -455,7 +550,7 @@
 // HTTPBodyContainsf asserts that a specified handler returns a
 // body that contains a string.
 //
-//  a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -468,7 +563,7 @@
 // HTTPBodyNotContains asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -481,7 +576,7 @@
 // HTTPBodyNotContainsf asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -493,7 +588,7 @@
 
 // HTTPError asserts that a specified handler returns an error status code.
 //
-//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -505,7 +600,7 @@
 
 // HTTPErrorf asserts that a specified handler returns an error status code.
 //
-//  a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -517,7 +612,7 @@
 
 // HTTPRedirect asserts that a specified handler returns a redirect status code.
 //
-//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -529,7 +624,7 @@
 
 // HTTPRedirectf asserts that a specified handler returns a redirect status code.
 //
-//  a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -541,7 +636,7 @@
 
 // HTTPStatusCode asserts that a specified handler returns a specified status code.
 //
-//  a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//	a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -553,7 +648,7 @@
 
 // HTTPStatusCodef asserts that a specified handler returns a specified status code.
 //
-//  a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//	a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -565,7 +660,7 @@
 
 // HTTPSuccess asserts that a specified handler returns a success status code.
 //
-//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//	a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -577,7 +672,7 @@
 
 // HTTPSuccessf asserts that a specified handler returns a success status code.
 //
-//  a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//	a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -589,7 +684,7 @@
 
 // Implements asserts that an object is implemented by the specified interface.
 //
-//    a.Implements((*MyInterface)(nil), new(MyObject))
+//	a.Implements((*MyInterface)(nil), new(MyObject))
 func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -599,7 +694,7 @@
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//	a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
 func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -609,7 +704,7 @@
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDelta(math.Pi, 22/7.0, 0.01)
+//	a.InDelta(math.Pi, 22/7.0, 0.01)
 func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -651,7 +746,7 @@
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+//	a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
 func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -693,9 +788,9 @@
 
 // IsDecreasing asserts that the collection is decreasing
 //
-//    a.IsDecreasing([]int{2, 1, 0})
-//    a.IsDecreasing([]float{2, 1})
-//    a.IsDecreasing([]string{"b", "a"})
+//	a.IsDecreasing([]int{2, 1, 0})
+//	a.IsDecreasing([]float{2, 1})
+//	a.IsDecreasing([]string{"b", "a"})
 func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -705,9 +800,9 @@
 
 // IsDecreasingf asserts that the collection is decreasing
 //
-//    a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
-//    a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
-//    a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+//	a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+//	a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+//	a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
 func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -717,9 +812,9 @@
 
 // IsIncreasing asserts that the collection is increasing
 //
-//    a.IsIncreasing([]int{1, 2, 3})
-//    a.IsIncreasing([]float{1, 2})
-//    a.IsIncreasing([]string{"a", "b"})
+//	a.IsIncreasing([]int{1, 2, 3})
+//	a.IsIncreasing([]float{1, 2})
+//	a.IsIncreasing([]string{"a", "b"})
 func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -729,9 +824,9 @@
 
 // IsIncreasingf asserts that the collection is increasing
 //
-//    a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
-//    a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
-//    a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+//	a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+//	a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+//	a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
 func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -741,9 +836,9 @@
 
 // IsNonDecreasing asserts that the collection is not decreasing
 //
-//    a.IsNonDecreasing([]int{1, 1, 2})
-//    a.IsNonDecreasing([]float{1, 2})
-//    a.IsNonDecreasing([]string{"a", "b"})
+//	a.IsNonDecreasing([]int{1, 1, 2})
+//	a.IsNonDecreasing([]float{1, 2})
+//	a.IsNonDecreasing([]string{"a", "b"})
 func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -753,9 +848,9 @@
 
 // IsNonDecreasingf asserts that the collection is not decreasing
 //
-//    a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
-//    a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
-//    a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
 func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -765,9 +860,9 @@
 
 // IsNonIncreasing asserts that the collection is not increasing
 //
-//    a.IsNonIncreasing([]int{2, 1, 1})
-//    a.IsNonIncreasing([]float{2, 1})
-//    a.IsNonIncreasing([]string{"b", "a"})
+//	a.IsNonIncreasing([]int{2, 1, 1})
+//	a.IsNonIncreasing([]float{2, 1})
+//	a.IsNonIncreasing([]string{"b", "a"})
 func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -777,9 +872,9 @@
 
 // IsNonIncreasingf asserts that the collection is not increasing
 //
-//    a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
-//    a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
-//    a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
 func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -787,7 +882,29 @@
 	return IsNonIncreasingf(a.t, object, msg, args...)
 }
 
+// IsNotType asserts that the specified objects are not of the same type.
+//
+//	a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+//	a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotTypef(a.t, theType, object, msg, args...)
+}
+
 // IsType asserts that the specified objects are of the same type.
+//
+//	a.IsType(&MyStruct{}, &MyStruct{})
 func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -796,6 +913,8 @@
 }
 
 // IsTypef asserts that the specified objects are of the same type.
+//
+//	a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
 func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -805,7 +924,7 @@
 
 // JSONEq asserts that two JSON strings are equivalent.
 //
-//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//	a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
 func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -815,7 +934,7 @@
 
 // JSONEqf asserts that two JSON strings are equivalent.
 //
-//  a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//	a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
 func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -826,7 +945,7 @@
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
-//    a.Len(mySlice, 3)
+//	a.Len(mySlice, 3)
 func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -837,7 +956,7 @@
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
-//    a.Lenf(mySlice, 3, "error message %s", "formatted")
+//	a.Lenf(mySlice, 3, "error message %s", "formatted")
 func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -847,9 +966,9 @@
 
 // Less asserts that the first element is less than the second
 //
-//    a.Less(1, 2)
-//    a.Less(float64(1), float64(2))
-//    a.Less("a", "b")
+//	a.Less(1, 2)
+//	a.Less(float64(1), float64(2))
+//	a.Less("a", "b")
 func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -859,10 +978,10 @@
 
 // LessOrEqual asserts that the first element is less than or equal to the second
 //
-//    a.LessOrEqual(1, 2)
-//    a.LessOrEqual(2, 2)
-//    a.LessOrEqual("a", "b")
-//    a.LessOrEqual("b", "b")
+//	a.LessOrEqual(1, 2)
+//	a.LessOrEqual(2, 2)
+//	a.LessOrEqual("a", "b")
+//	a.LessOrEqual("b", "b")
 func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -872,10 +991,10 @@
 
 // LessOrEqualf asserts that the first element is less than or equal to the second
 //
-//    a.LessOrEqualf(1, 2, "error message %s", "formatted")
-//    a.LessOrEqualf(2, 2, "error message %s", "formatted")
-//    a.LessOrEqualf("a", "b", "error message %s", "formatted")
-//    a.LessOrEqualf("b", "b", "error message %s", "formatted")
+//	a.LessOrEqualf(1, 2, "error message %s", "formatted")
+//	a.LessOrEqualf(2, 2, "error message %s", "formatted")
+//	a.LessOrEqualf("a", "b", "error message %s", "formatted")
+//	a.LessOrEqualf("b", "b", "error message %s", "formatted")
 func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -885,9 +1004,9 @@
 
 // Lessf asserts that the first element is less than the second
 //
-//    a.Lessf(1, 2, "error message %s", "formatted")
-//    a.Lessf(float64(1), float64(2), "error message %s", "formatted")
-//    a.Lessf("a", "b", "error message %s", "formatted")
+//	a.Lessf(1, 2, "error message %s", "formatted")
+//	a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+//	a.Lessf("a", "b", "error message %s", "formatted")
 func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -897,8 +1016,8 @@
 
 // Negative asserts that the specified element is negative
 //
-//    a.Negative(-1)
-//    a.Negative(-1.23)
+//	a.Negative(-1)
+//	a.Negative(-1.23)
 func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -908,8 +1027,8 @@
 
 // Negativef asserts that the specified element is negative
 //
-//    a.Negativef(-1, "error message %s", "formatted")
-//    a.Negativef(-1.23, "error message %s", "formatted")
+//	a.Negativef(-1, "error message %s", "formatted")
+//	a.Negativef(-1.23, "error message %s", "formatted")
 func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -920,7 +1039,7 @@
 // Never asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+//	a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
 func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -931,7 +1050,7 @@
 // Neverf asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -941,7 +1060,7 @@
 
 // Nil asserts that the specified object is nil.
 //
-//    a.Nil(err)
+//	a.Nil(err)
 func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -951,7 +1070,7 @@
 
 // Nilf asserts that the specified object is nil.
 //
-//    a.Nilf(err, "error message %s", "formatted")
+//	a.Nilf(err, "error message %s", "formatted")
 func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -979,10 +1098,10 @@
 
 // NoError asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.NoError(err) {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if a.NoError(err) {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -992,10 +1111,10 @@
 
 // NoErrorf asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.NoErrorf(err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if a.NoErrorf(err, "error message %s", "formatted") {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1024,9 +1143,9 @@
 // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    a.NotContains("Hello World", "Earth")
-//    a.NotContains(["Hello", "World"], "Earth")
-//    a.NotContains({"Hello": "World"}, "Earth")
+//	a.NotContains("Hello World", "Earth")
+//	a.NotContains(["Hello", "World"], "Earth")
+//	a.NotContains({"Hello": "World"}, "Earth")
 func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1037,9 +1156,9 @@
 // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
-//    a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
-//    a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//	a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+//	a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+//	a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
 func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1047,12 +1166,45 @@
 	return NotContainsf(a.t, s, contains, msg, args...)
 }
 
-// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
 //
-//  if a.NotEmpty(obj) {
-//    assert.Equal(t, "two", obj[1])
-//  }
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT [Empty].
+//
+//	if a.NotEmpty(obj) {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1060,12 +1212,11 @@
 	return NotEmpty(a.t, object, msgAndArgs...)
 }
 
-// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
 //
-//  if a.NotEmptyf(obj, "error message %s", "formatted") {
-//    assert.Equal(t, "two", obj[1])
-//  }
+//	if a.NotEmptyf(obj, "error message %s", "formatted") {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1075,7 +1226,7 @@
 
 // NotEqual asserts that the specified values are NOT equal.
 //
-//    a.NotEqual(obj1, obj2)
+//	a.NotEqual(obj1, obj2)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -1088,7 +1239,7 @@
 
 // NotEqualValues asserts that two objects are not equal even when converted to the same type
 //
-//    a.NotEqualValues(obj1, obj2)
+//	a.NotEqualValues(obj1, obj2)
 func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1098,7 +1249,7 @@
 
 // NotEqualValuesf asserts that two objects are not equal even when converted to the same type
 //
-//    a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+//	a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
 func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1108,7 +1259,7 @@
 
 // NotEqualf asserts that the specified values are NOT equal.
 //
-//    a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//	a.NotEqualf(obj1, obj2, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -1119,7 +1270,25 @@
 	return NotEqualf(a.t, expected, actual, msg, args...)
 }
 
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1128,7 +1297,7 @@
 	return NotErrorIs(a.t, err, target, msgAndArgs...)
 }
 
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1137,9 +1306,29 @@
 	return NotErrorIsf(a.t, err, target, msg, args...)
 }
 
+// NotImplements asserts that an object does not implement the specified interface.
+//
+//	a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+//	a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
 // NotNil asserts that the specified object is not nil.
 //
-//    a.NotNil(err)
+//	a.NotNil(err)
 func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1149,7 +1338,7 @@
 
 // NotNilf asserts that the specified object is not nil.
 //
-//    a.NotNilf(err, "error message %s", "formatted")
+//	a.NotNilf(err, "error message %s", "formatted")
 func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1159,7 +1348,7 @@
 
 // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   a.NotPanics(func(){ RemainCalm() })
+//	a.NotPanics(func(){ RemainCalm() })
 func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1169,7 +1358,7 @@
 
 // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//	a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
 func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1179,8 +1368,8 @@
 
 // NotRegexp asserts that a specified regexp does not match a string.
 //
-//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-//  a.NotRegexp("^start", "it's not starting")
+//	a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//	a.NotRegexp("^start", "it's not starting")
 func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1190,8 +1379,8 @@
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-//  a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//	a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//	a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1201,7 +1390,7 @@
 
 // NotSame asserts that two pointers do not reference the same object.
 //
-//    a.NotSame(ptr1, ptr2)
+//	a.NotSame(ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1214,7 +1403,7 @@
 
 // NotSamef asserts that two pointers do not reference the same object.
 //
-//    a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//	a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1225,10 +1414,15 @@
 	return NotSamef(a.t, expected, actual, msg, args...)
 }
 
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//	a.NotSubset([1, 3, 4], [1, 2])
+//	a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+//	a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+//	a.NotSubset({"x": 1, "y": 2}, ["z"])
 func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1236,10 +1430,15 @@
 	return NotSubset(a.t, list, subset, msgAndArgs...)
 }
 
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//	a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+//	a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+//	a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
 func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1265,7 +1464,7 @@
 
 // Panics asserts that the code inside the specified PanicTestFunc panics.
 //
-//   a.Panics(func(){ GoCrazy() })
+//	a.Panics(func(){ GoCrazy() })
 func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1277,7 +1476,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   a.PanicsWithError("crazy error", func(){ GoCrazy() })
+//	a.PanicsWithError("crazy error", func(){ GoCrazy() })
 func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1289,7 +1488,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1300,7 +1499,7 @@
 // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//	a.PanicsWithValue("crazy error", func(){ GoCrazy() })
 func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1311,7 +1510,7 @@
 // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1321,7 +1520,7 @@
 
 // Panicsf asserts that the code inside the specified PanicTestFunc panics.
 //
-//   a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1331,8 +1530,8 @@
 
 // Positive asserts that the specified element is positive
 //
-//    a.Positive(1)
-//    a.Positive(1.23)
+//	a.Positive(1)
+//	a.Positive(1.23)
 func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1342,8 +1541,8 @@
 
 // Positivef asserts that the specified element is positive
 //
-//    a.Positivef(1, "error message %s", "formatted")
-//    a.Positivef(1.23, "error message %s", "formatted")
+//	a.Positivef(1, "error message %s", "formatted")
+//	a.Positivef(1.23, "error message %s", "formatted")
 func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1353,8 +1552,8 @@
 
 // Regexp asserts that a specified regexp matches a string.
 //
-//  a.Regexp(regexp.MustCompile("start"), "it's starting")
-//  a.Regexp("start...$", "it's not starting")
+//	a.Regexp(regexp.MustCompile("start"), "it's starting")
+//	a.Regexp("start...$", "it's not starting")
 func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1364,8 +1563,8 @@
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-//  a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//	a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//	a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1375,7 +1574,7 @@
 
 // Same asserts that two pointers reference the same object.
 //
-//    a.Same(ptr1, ptr2)
+//	a.Same(ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1388,7 +1587,7 @@
 
 // Samef asserts that two pointers reference the same object.
 //
-//    a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//	a.Samef(ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1399,10 +1598,15 @@
 	return Samef(a.t, expected, actual, msg, args...)
 }
 
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//	a.Subset([1, 2, 3], [1, 2])
+//	a.Subset({"x": 1, "y": 2}, {"x": 1})
+//	a.Subset([1, 2, 3], {1: "one", 2: "two"})
+//	a.Subset({"x": 1, "y": 2}, ["x"])
 func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1410,10 +1614,15 @@
 	return Subset(a.t, list, subset, msgAndArgs...)
 }
 
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//	a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+//	a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+//	a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
 func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1423,7 +1632,7 @@
 
 // True asserts that the specified value is true.
 //
-//    a.True(myBool)
+//	a.True(myBool)
 func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1433,7 +1642,7 @@
 
 // Truef asserts that the specified value is true.
 //
-//    a.Truef(myBool, "error message %s", "formatted")
+//	a.Truef(myBool, "error message %s", "formatted")
 func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1443,7 +1652,7 @@
 
 // WithinDuration asserts that the two times are within duration delta of each other.
 //
-//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//	a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
 func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1453,7 +1662,7 @@
 
 // WithinDurationf asserts that the two times are within duration delta of each other.
 //
-//   a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//	a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
 func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1463,7 +1672,7 @@
 
 // WithinRange asserts that a time is within a time range (inclusive).
 //
-//   a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+//	a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
 func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1473,7 +1682,7 @@
 
 // WithinRangef asserts that a time is within a time range (inclusive).
 //
-//   a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+//	a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
 func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 7594487..2fdf80f 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -6,7 +6,7 @@
 )
 
 // isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
 	objKind := reflect.TypeOf(object).Kind()
 	if objKind != reflect.Slice && objKind != reflect.Array {
 		return false
@@ -33,7 +33,7 @@
 		compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
 
 		if !isComparable {
-			return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+			return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
 		}
 
 		if !containsValue(allowedComparesResults, compareResult) {
@@ -46,36 +46,36 @@
 
 // IsIncreasing asserts that the collection is increasing
 //
-//    assert.IsIncreasing(t, []int{1, 2, 3})
-//    assert.IsIncreasing(t, []float{1, 2})
-//    assert.IsIncreasing(t, []string{"a", "b"})
+//	assert.IsIncreasing(t, []int{1, 2, 3})
+//	assert.IsIncreasing(t, []float{1, 2})
+//	assert.IsIncreasing(t, []string{"a", "b"})
 func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
 }
 
 // IsNonIncreasing asserts that the collection is not increasing
 //
-//    assert.IsNonIncreasing(t, []int{2, 1, 1})
-//    assert.IsNonIncreasing(t, []float{2, 1})
-//    assert.IsNonIncreasing(t, []string{"b", "a"})
+//	assert.IsNonIncreasing(t, []int{2, 1, 1})
+//	assert.IsNonIncreasing(t, []float{2, 1})
+//	assert.IsNonIncreasing(t, []string{"b", "a"})
 func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
 }
 
 // IsDecreasing asserts that the collection is decreasing
 //
-//    assert.IsDecreasing(t, []int{2, 1, 0})
-//    assert.IsDecreasing(t, []float{2, 1})
-//    assert.IsDecreasing(t, []string{"b", "a"})
+//	assert.IsDecreasing(t, []int{2, 1, 0})
+//	assert.IsDecreasing(t, []float{2, 1})
+//	assert.IsDecreasing(t, []string{"b", "a"})
 func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
 }
 
 // IsNonDecreasing asserts that the collection is not decreasing
 //
-//    assert.IsNonDecreasing(t, []int{1, 1, 2})
-//    assert.IsNonDecreasing(t, []float{1, 2})
-//    assert.IsNonDecreasing(t, []string{"a", "b"})
+//	assert.IsNonDecreasing(t, []int{1, 1, 2})
+//	assert.IsNonDecreasing(t, []float{1, 2})
+//	assert.IsNonDecreasing(t, []string{"a", "b"})
 func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
 }
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index fa1245b..de8de0c 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"math"
 	"os"
-	"path/filepath"
 	"reflect"
 	"regexp"
 	"runtime"
@@ -20,7 +19,9 @@
 
 	"github.com/davecgh/go-spew/spew"
 	"github.com/pmezard/go-difflib/difflib"
-	yaml "gopkg.in/yaml.v3"
+
+	// Wrapper around gopkg.in/yaml.v3
+	"github.com/stretchr/testify/assert/yaml"
 )
 
 //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -46,6 +47,10 @@
 // for table driven tests.
 type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
 
+// PanicAssertionFunc is a common function prototype when validating a panic value.  Can be useful
+// for table driven tests.
+type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
+
 // Comparison is a custom function that returns true on success and false on failure
 type Comparison func() (success bool)
 
@@ -76,6 +81,84 @@
 	return bytes.Equal(exp, act)
 }
 
+// copyExportedFields iterates downward through nested data structures and creates a copy
+// that only contains the exported struct fields.
+func copyExportedFields(expected interface{}) interface{} {
+	if isNil(expected) {
+		return expected
+	}
+
+	expectedType := reflect.TypeOf(expected)
+	expectedKind := expectedType.Kind()
+	expectedValue := reflect.ValueOf(expected)
+
+	switch expectedKind {
+	case reflect.Struct:
+		result := reflect.New(expectedType).Elem()
+		for i := 0; i < expectedType.NumField(); i++ {
+			field := expectedType.Field(i)
+			isExported := field.IsExported()
+			if isExported {
+				fieldValue := expectedValue.Field(i)
+				if isNil(fieldValue) || isNil(fieldValue.Interface()) {
+					continue
+				}
+				newValue := copyExportedFields(fieldValue.Interface())
+				result.Field(i).Set(reflect.ValueOf(newValue))
+			}
+		}
+		return result.Interface()
+
+	case reflect.Ptr:
+		result := reflect.New(expectedType.Elem())
+		unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface())
+		result.Elem().Set(reflect.ValueOf(unexportedRemoved))
+		return result.Interface()
+
+	case reflect.Array, reflect.Slice:
+		var result reflect.Value
+		if expectedKind == reflect.Array {
+			result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
+		} else {
+			result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+		}
+		for i := 0; i < expectedValue.Len(); i++ {
+			index := expectedValue.Index(i)
+			if isNil(index) {
+				continue
+			}
+			unexportedRemoved := copyExportedFields(index.Interface())
+			result.Index(i).Set(reflect.ValueOf(unexportedRemoved))
+		}
+		return result.Interface()
+
+	case reflect.Map:
+		result := reflect.MakeMap(expectedType)
+		for _, k := range expectedValue.MapKeys() {
+			index := expectedValue.MapIndex(k)
+			unexportedRemoved := copyExportedFields(index.Interface())
+			result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved))
+		}
+		return result.Interface()
+
+	default:
+		return expected
+	}
+}
+
+// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are
+// considered equal. This comparison of only exported fields is applied recursively to nested data
+// structures.
+//
+// This function does no assertion of any kind.
+//
+// Deprecated: Use [EqualExportedValues] instead.
+func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
+	expectedCleaned := copyExportedFields(expected)
+	actualCleaned := copyExportedFields(actual)
+	return ObjectsAreEqualValues(expectedCleaned, actualCleaned)
+}
+
 // ObjectsAreEqualValues gets whether two objects are equal, or if their
 // values are equal.
 func ObjectsAreEqualValues(expected, actual interface{}) bool {
@@ -83,17 +166,40 @@
 		return true
 	}
 
-	actualType := reflect.TypeOf(actual)
-	if actualType == nil {
+	expectedValue := reflect.ValueOf(expected)
+	actualValue := reflect.ValueOf(actual)
+	if !expectedValue.IsValid() || !actualValue.IsValid() {
 		return false
 	}
-	expectedValue := reflect.ValueOf(expected)
-	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
-		// Attempt comparison after type conversion
-		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+
+	expectedType := expectedValue.Type()
+	actualType := actualValue.Type()
+	if !expectedType.ConvertibleTo(actualType) {
+		return false
 	}
 
-	return false
+	if !isNumericType(expectedType) || !isNumericType(actualType) {
+		// Attempt comparison after type conversion
+		return reflect.DeepEqual(
+			expectedValue.Convert(actualType).Interface(), actual,
+		)
+	}
+
+	// If BOTH values are numeric, there are chances of false positives due
+	// to overflow or underflow. So, we need to make sure to always convert
+	// the smaller type to a larger type before comparing.
+	if expectedType.Size() >= actualType.Size() {
+		return actualValue.Convert(expectedType).Interface() == expected
+	}
+
+	return expectedValue.Convert(actualType).Interface() == actual
+}
+
+// isNumericType returns true if the type is one of:
+// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
+// float32, float64, complex64, complex128
+func isNumericType(t reflect.Type) bool {
+	return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
 }
 
 /* CallerInfo is necessary because the assert functions use the testing object
@@ -104,60 +210,77 @@
 // of each stack frame leading from the current test to the assert call that
 // failed.
 func CallerInfo() []string {
-
 	var pc uintptr
-	var ok bool
 	var file string
 	var line int
 	var name string
 
+	const stackFrameBufferSize = 10
+	pcs := make([]uintptr, stackFrameBufferSize)
+
 	callers := []string{}
-	for i := 0; ; i++ {
-		pc, file, line, ok = runtime.Caller(i)
-		if !ok {
-			// The breaks below failed to terminate the loop, and we ran off the
-			// end of the call stack.
+	offset := 1
+
+	for {
+		n := runtime.Callers(offset, pcs)
+
+		if n == 0 {
 			break
 		}
 
-		// This is a huge edge case, but it will panic if this is the case, see #180
-		if file == "<autogenerated>" {
-			break
-		}
+		frames := runtime.CallersFrames(pcs[:n])
 
-		f := runtime.FuncForPC(pc)
-		if f == nil {
-			break
-		}
-		name = f.Name()
+		for {
+			frame, more := frames.Next()
+			pc = frame.PC
+			file = frame.File
+			line = frame.Line
 
-		// testing.tRunner is the standard library function that calls
-		// tests. Subtests are called directly by tRunner, without going through
-		// the Test/Benchmark/Example function that contains the t.Run calls, so
-		// with subtests we should break when we hit tRunner, without adding it
-		// to the list of callers.
-		if name == "testing.tRunner" {
-			break
-		}
+			// This is a huge edge case, but it will panic if this is the case, see #180
+			if file == "<autogenerated>" {
+				break
+			}
 
-		parts := strings.Split(file, "/")
-		file = parts[len(parts)-1]
-		if len(parts) > 1 {
-			dir := parts[len(parts)-2]
-			if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
-				path, _ := filepath.Abs(file)
-				callers = append(callers, fmt.Sprintf("%s:%d", path, line))
+			f := runtime.FuncForPC(pc)
+			if f == nil {
+				break
+			}
+			name = f.Name()
+
+			// testing.tRunner is the standard library function that calls
+			// tests. Subtests are called directly by tRunner, without going through
+			// the Test/Benchmark/Example function that contains the t.Run calls, so
+			// with subtests we should break when we hit tRunner, without adding it
+			// to the list of callers.
+			if name == "testing.tRunner" {
+				break
+			}
+
+			parts := strings.Split(file, "/")
+			if len(parts) > 1 {
+				filename := parts[len(parts)-1]
+				dir := parts[len(parts)-2]
+				if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+					callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+				}
+			}
+
+			// Drop the package
+			dotPos := strings.LastIndexByte(name, '.')
+			name = name[dotPos+1:]
+			if isTest(name, "Test") ||
+				isTest(name, "Benchmark") ||
+				isTest(name, "Example") {
+				break
+			}
+
+			if !more {
+				break
 			}
 		}
 
-		// Drop the package
-		segments := strings.Split(name, ".")
-		name = segments[len(segments)-1]
-		if isTest(name, "Test") ||
-			isTest(name, "Benchmark") ||
-			isTest(name, "Example") {
-			break
-		}
+		// Next batch
+		offset += cap(pcs)
 	}
 
 	return callers
@@ -197,7 +320,7 @@
 
 // Aligns the provided message so that all lines after the first line start at the same location as the first line.
 // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
-// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
 // basis on which the alignment occurs).
 func indentMessageLines(message string, longestLabelLen int) string {
 	outBuf := new(bytes.Buffer)
@@ -273,7 +396,7 @@
 
 // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
 //
-//   \t{{label}}:{{align_spaces}}\t{{content}}\n
+//	\t{{label}}:{{align_spaces}}\t{{content}}\n
 //
 // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
 // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
@@ -296,7 +419,7 @@
 
 // Implements asserts that an object is implemented by the specified interface.
 //
-//    assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+//	assert.Implements(t, (*MyInterface)(nil), new(MyObject))
 func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -313,22 +436,58 @@
 	return true
 }
 
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+// NotImplements asserts that an object does not implement the specified interface.
+//
+//	assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
 
-	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
-		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+	if object == nil {
+		return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
+	}
+	if reflect.TypeOf(object).Implements(interfaceType) {
+		return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
 	}
 
 	return true
 }
 
+func isType(expectedType, object interface{}) bool {
+	return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
+}
+
+// IsType asserts that the specified objects are of the same type.
+//
+//	assert.IsType(t, &MyStruct{}, &MyStruct{})
+func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
+	if isType(expectedType, object) {
+		return true
+	}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
+}
+
+// IsNotType asserts that the specified objects are not of the same type.
+//
+//	assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
+	if !isType(theType, object) {
+		return true
+	}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
+}
+
 // Equal asserts that two objects are equal.
 //
-//    assert.Equal(t, 123, 123)
+//	assert.Equal(t, 123, 123)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -351,7 +510,6 @@
 	}
 
 	return true
-
 }
 
 // validateEqualArgs checks whether provided arguments can be safely used in the
@@ -369,7 +527,7 @@
 
 // Same asserts that two pointers reference the same object.
 //
-//    assert.Same(t, ptr1, ptr2)
+//	assert.Same(t, ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -378,10 +536,17 @@
 		h.Helper()
 	}
 
-	if !samePointers(expected, actual) {
+	same, ok := samePointers(expected, actual)
+	if !ok {
+		return Fail(t, "Both arguments must be pointers", msgAndArgs...)
+	}
+
+	if !same {
+		// both are pointers but not the same type & pointing to the same address
 		return Fail(t, fmt.Sprintf("Not same: \n"+
-			"expected: %p %#v\n"+
-			"actual  : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+			"expected: %p %#[1]v\n"+
+			"actual  : %p %#[2]v",
+			expected, actual), msgAndArgs...)
 	}
 
 	return true
@@ -389,7 +554,7 @@
 
 // NotSame asserts that two pointers do not reference the same object.
 //
-//    assert.NotSame(t, ptr1, ptr2)
+//	assert.NotSame(t, ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -398,36 +563,44 @@
 		h.Helper()
 	}
 
-	if samePointers(expected, actual) {
+	same, ok := samePointers(expected, actual)
+	if !ok {
+		// fails when the arguments are not pointers
+		return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
+	}
+
+	if same {
 		return Fail(t, fmt.Sprintf(
-			"Expected and actual point to the same object: %p %#v",
-			expected, expected), msgAndArgs...)
+			"Expected and actual point to the same object: %p %#[1]v",
+			expected), msgAndArgs...)
 	}
 	return true
 }
 
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
+// samePointers checks if two generic interface objects are pointers of the same
+// type pointing to the same object. It returns two values: same indicating if
+// they are the same type and point to the same object, and ok indicating that
+// both inputs are pointers.
+func samePointers(first, second interface{}) (same bool, ok bool) {
 	firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
 	if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
-		return false
+		return false, false // not both are pointers
 	}
 
 	firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
 	if firstType != secondType {
-		return false
+		return false, true // both are pointers, but of different types
 	}
 
 	// compare pointer addresses
-	return first == second
+	return first == second, true
 }
 
 // formatUnequalValues takes two values of arbitrary types and returns string
 // representations appropriate to be presented to the user.
 //
 // If the values are not of like type, the returned strings will be prefixed
-// with the type name, and the value will be enclosed in parenthesis similar
+// with the type name, and the value will be enclosed in parentheses similar
 // to a type conversion in the Go grammar.
 func formatUnequalValues(expected, actual interface{}) (e string, a string) {
 	if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@@ -454,10 +627,10 @@
 	return value
 }
 
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
 //
-//    assert.EqualValues(t, uint32(123), int32(123))
+//	assert.EqualValues(t, uint32(123), int32(123))
 func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -472,12 +645,47 @@
 	}
 
 	return true
+}
 
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
+//	 assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
+func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType {
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+	}
+
+	expected = copyExportedFields(expected)
+	actual = copyExportedFields(actual)
+
+	if !ObjectsAreEqualValues(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
 }
 
 // Exactly asserts that two objects are equal in value and type.
 //
-//    assert.Exactly(t, int32(123), int64(123))
+//	assert.Exactly(t, int32(123), int64(123))
 func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -491,12 +699,11 @@
 	}
 
 	return Equal(t, expected, actual, msgAndArgs...)
-
 }
 
 // NotNil asserts that the specified object is not nil.
 //
-//    assert.NotNil(t, err)
+//	assert.NotNil(t, err)
 func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	if !isNil(object) {
 		return true
@@ -507,17 +714,6 @@
 	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
 }
 
-// containsKind checks if a specified kind in the slice of kinds.
-func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
-	for i := 0; i < len(kinds); i++ {
-		if kind == kinds[i] {
-			return true
-		}
-	}
-
-	return false
-}
-
 // isNil checks if a specified object is nil or not, without Failing.
 func isNil(object interface{}) bool {
 	if object == nil {
@@ -525,16 +721,13 @@
 	}
 
 	value := reflect.ValueOf(object)
-	kind := value.Kind()
-	isNilableKind := containsKind(
-		[]reflect.Kind{
-			reflect.Chan, reflect.Func,
-			reflect.Interface, reflect.Map,
-			reflect.Ptr, reflect.Slice},
-		kind)
+	switch value.Kind() {
+	case
+		reflect.Chan, reflect.Func,
+		reflect.Interface, reflect.Map,
+		reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
 
-	if isNilableKind && value.IsNil() {
-		return true
+		return value.IsNil()
 	}
 
 	return false
@@ -542,7 +735,7 @@
 
 // Nil asserts that the specified object is nil.
 //
-//    assert.Nil(t, err)
+//	assert.Nil(t, err)
 func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	if isNil(object) {
 		return true
@@ -555,37 +748,45 @@
 
 // isEmpty gets whether the specified object is considered empty or not.
 func isEmpty(object interface{}) bool {
-
 	// get nil case out of the way
 	if object == nil {
 		return true
 	}
 
-	objValue := reflect.ValueOf(object)
-
-	switch objValue.Kind() {
-	// collection types are empty when they have no element
-	case reflect.Chan, reflect.Map, reflect.Slice:
-		return objValue.Len() == 0
-	// pointers are empty if nil or if the value they point to is empty
-	case reflect.Ptr:
-		if objValue.IsNil() {
-			return true
-		}
-		deref := objValue.Elem().Interface()
-		return isEmpty(deref)
-	// for all other types, compare against the zero value
-	// array types are empty when they match their zero-initialized state
-	default:
-		zero := reflect.Zero(objValue.Type())
-		return reflect.DeepEqual(object, zero.Interface())
-	}
+	return isEmptyValue(reflect.ValueOf(object))
 }
 
-// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// isEmptyValue gets whether the specified reflect.Value is considered empty or not.
+func isEmptyValue(objValue reflect.Value) bool {
+	if objValue.IsZero() {
+		return true
+	}
+	// Special cases of non-zero values that we consider empty
+	switch objValue.Kind() {
+	// collection types are empty when they have no element
+	// Note: array types are empty when they match their zero-initialized state.
+	case reflect.Chan, reflect.Map, reflect.Slice:
+		return objValue.Len() == 0
+	// non-nil pointers are empty if the value they point to is empty
+	case reflect.Ptr:
+		return isEmptyValue(objValue.Elem())
+	}
+	return false
+}
+
+// Empty asserts that the given value is "empty".
 //
-//  assert.Empty(t, obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	assert.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	pass := isEmpty(object)
 	if !pass {
@@ -596,15 +797,13 @@
 	}
 
 	return pass
-
 }
 
-// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
 //
-//  if assert.NotEmpty(t, obj) {
-//    assert.Equal(t, "two", obj[1])
-//  }
+//	if assert.NotEmpty(t, obj) {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	pass := !isEmpty(object)
 	if !pass {
@@ -615,43 +814,40 @@
 	}
 
 	return pass
-
 }
 
-// getLen try to get length of object.
-// return (false, 0) if impossible.
-func getLen(x interface{}) (ok bool, length int) {
+// getLen tries to get the length of an object.
+// It returns (0, false) if impossible.
+func getLen(x interface{}) (length int, ok bool) {
 	v := reflect.ValueOf(x)
 	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
+		ok = recover() == nil
 	}()
-	return true, v.Len()
+	return v.Len(), true
 }
 
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
-//    assert.Len(t, mySlice, 3)
+//	assert.Len(t, mySlice, 3)
 func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	ok, l := getLen(object)
+	l, ok := getLen(object)
 	if !ok {
-		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
 	}
 
 	if l != length {
-		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
 	}
 	return true
 }
 
 // True asserts that the specified value is true.
 //
-//    assert.True(t, myBool)
+//	assert.True(t, myBool)
 func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
 	if !value {
 		if h, ok := t.(tHelper); ok {
@@ -661,12 +857,11 @@
 	}
 
 	return true
-
 }
 
 // False asserts that the specified value is false.
 //
-//    assert.False(t, myBool)
+//	assert.False(t, myBool)
 func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
 	if value {
 		if h, ok := t.(tHelper); ok {
@@ -676,12 +871,11 @@
 	}
 
 	return true
-
 }
 
 // NotEqual asserts that the specified values are NOT equal.
 //
-//    assert.NotEqual(t, obj1, obj2)
+//	assert.NotEqual(t, obj1, obj2)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -699,12 +893,11 @@
 	}
 
 	return true
-
 }
 
 // NotEqualValues asserts that two objects are not equal even when converted to the same type
 //
-//    assert.NotEqualValues(t, obj1, obj2)
+//	assert.NotEqualValues(t, obj1, obj2)
 func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -722,7 +915,6 @@
 // return (true, false) if element was not found.
 // return (true, true) if element was found.
 func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
 	listValue := reflect.ValueOf(list)
 	listType := reflect.TypeOf(list)
 	if listType == nil {
@@ -757,15 +949,14 @@
 		}
 	}
 	return true, false
-
 }
 
 // Contains asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    assert.Contains(t, "Hello World", "World")
-//    assert.Contains(t, ["Hello", "World"], "World")
-//    assert.Contains(t, {"Hello": "World"}, "Hello")
+//	assert.Contains(t, "Hello World", "World")
+//	assert.Contains(t, ["Hello", "World"], "World")
+//	assert.Contains(t, {"Hello": "World"}, "Hello")
 func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -780,15 +971,14 @@
 	}
 
 	return true
-
 }
 
 // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    assert.NotContains(t, "Hello World", "Earth")
-//    assert.NotContains(t, ["Hello", "World"], "Earth")
-//    assert.NotContains(t, {"Hello": "World"}, "Earth")
+//	assert.NotContains(t, "Hello World", "Earth")
+//	assert.NotContains(t, ["Hello", "World"], "Earth")
+//	assert.NotContains(t, {"Hello": "World"}, "Earth")
 func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -796,20 +986,24 @@
 
 	ok, found := containsElement(s, contains)
 	if !ok {
-		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
 	}
 	if found {
-		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...)
 	}
 
 	return true
-
 }
 
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//	assert.Subset(t, [1, 2, 3], [1, 2])
+//	assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+//	assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+//	assert.Subset(t, {"x": 1, "y": 2}, ["x"])
 func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -818,59 +1012,66 @@
 		return true // we consider nil to be equal to the nil set
 	}
 
-	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
-	}()
-
 	listKind := reflect.TypeOf(list).Kind()
-	subsetKind := reflect.TypeOf(subset).Kind()
-
 	if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
 	}
 
-	if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+	subsetKind := reflect.TypeOf(subset).Kind()
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
 	}
 
-	subsetValue := reflect.ValueOf(subset)
 	if subsetKind == reflect.Map && listKind == reflect.Map {
-		listValue := reflect.ValueOf(list)
-		subsetKeys := subsetValue.MapKeys()
+		subsetMap := reflect.ValueOf(subset)
+		actualMap := reflect.ValueOf(list)
 
-		for i := 0; i < len(subsetKeys); i++ {
-			subsetKey := subsetKeys[i]
-			subsetElement := subsetValue.MapIndex(subsetKey).Interface()
-			listElement := listValue.MapIndex(subsetKey).Interface()
+		for _, k := range subsetMap.MapKeys() {
+			ev := subsetMap.MapIndex(k)
+			av := actualMap.MapIndex(k)
 
-			if !ObjectsAreEqual(subsetElement, listElement) {
-				return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+			if !av.IsValid() {
+				return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+			}
+			if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+				return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
 			}
 		}
 
 		return true
 	}
 
-	for i := 0; i < subsetValue.Len(); i++ {
-		element := subsetValue.Index(i).Interface()
+	subsetList := reflect.ValueOf(subset)
+	if subsetKind == reflect.Map {
+		keys := make([]interface{}, subsetList.Len())
+		for idx, key := range subsetList.MapKeys() {
+			keys[idx] = key.Interface()
+		}
+		subsetList = reflect.ValueOf(keys)
+	}
+	for i := 0; i < subsetList.Len(); i++ {
+		element := subsetList.Index(i).Interface()
 		ok, found := containsElement(list, element)
 		if !ok {
-			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
 		}
 		if !found {
-			return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
 		}
 	}
 
 	return true
 }
 
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//	assert.NotSubset(t, [1, 3, 4], [1, 2])
+//	assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+//	assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+//	assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
 func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -879,34 +1080,28 @@
 		return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
 	}
 
-	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
-	}()
-
 	listKind := reflect.TypeOf(list).Kind()
-	subsetKind := reflect.TypeOf(subset).Kind()
-
 	if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
 	}
 
-	if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+	subsetKind := reflect.TypeOf(subset).Kind()
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
 	}
 
-	subsetValue := reflect.ValueOf(subset)
 	if subsetKind == reflect.Map && listKind == reflect.Map {
-		listValue := reflect.ValueOf(list)
-		subsetKeys := subsetValue.MapKeys()
+		subsetMap := reflect.ValueOf(subset)
+		actualMap := reflect.ValueOf(list)
 
-		for i := 0; i < len(subsetKeys); i++ {
-			subsetKey := subsetKeys[i]
-			subsetElement := subsetValue.MapIndex(subsetKey).Interface()
-			listElement := listValue.MapIndex(subsetKey).Interface()
+		for _, k := range subsetMap.MapKeys() {
+			ev := subsetMap.MapIndex(k)
+			av := actualMap.MapIndex(k)
 
-			if !ObjectsAreEqual(subsetElement, listElement) {
+			if !av.IsValid() {
+				return true
+			}
+			if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
 				return true
 			}
 		}
@@ -914,11 +1109,19 @@
 		return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
 	}
 
-	for i := 0; i < subsetValue.Len(); i++ {
-		element := subsetValue.Index(i).Interface()
+	subsetList := reflect.ValueOf(subset)
+	if subsetKind == reflect.Map {
+		keys := make([]interface{}, subsetList.Len())
+		for idx, key := range subsetList.MapKeys() {
+			keys[idx] = key.Interface()
+		}
+		subsetList = reflect.ValueOf(keys)
+	}
+	for i := 0; i < subsetList.Len(); i++ {
+		element := subsetList.Index(i).Interface()
 		ok, found := containsElement(list, element)
 		if !ok {
-			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
 		}
 		if !found {
 			return true
@@ -1024,6 +1227,39 @@
 	return msg.String()
 }
 
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isEmpty(listA) && isEmpty(listB) {
+		return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+	}
+
+	if !isList(t, listA, msgAndArgs...) {
+		return Fail(t, "listA is not a list type", msgAndArgs...)
+	}
+	if !isList(t, listB, msgAndArgs...) {
+		return Fail(t, "listB is not a list type", msgAndArgs...)
+	}
+
+	extraA, extraB := diffLists(listA, listB)
+	if len(extraA) == 0 && len(extraB) == 0 {
+		return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+	}
+
+	return true
+}
+
 // Condition uses a Comparison to assert a complex condition.
 func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -1060,7 +1296,7 @@
 
 // Panics asserts that the code inside the specified PanicTestFunc panics.
 //
-//   assert.Panics(t, func(){ GoCrazy() })
+//	assert.Panics(t, func(){ GoCrazy() })
 func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1076,7 +1312,7 @@
 // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//	assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
 func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1097,7 +1333,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+//	assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
 func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1117,7 +1353,7 @@
 
 // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   assert.NotPanics(t, func(){ RemainCalm() })
+//	assert.NotPanics(t, func(){ RemainCalm() })
 func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1132,7 +1368,7 @@
 
 // WithinDuration asserts that the two times are within duration delta of each other.
 //
-//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+//	assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
 func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1148,7 +1384,7 @@
 
 // WithinRange asserts that a time is within a time range (inclusive).
 //
-//   assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+//	assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
 func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1207,7 +1443,7 @@
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+//	assert.InDelta(t, math.Pi, 22/7.0, 0.01)
 func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1336,12 +1572,15 @@
 		h.Helper()
 	}
 	if math.IsNaN(epsilon) {
-		return Fail(t, "epsilon must not be NaN")
+		return Fail(t, "epsilon must not be NaN", msgAndArgs...)
 	}
 	actualEpsilon, err := calcRelativeError(expected, actual)
 	if err != nil {
 		return Fail(t, err.Error(), msgAndArgs...)
 	}
+	if math.IsNaN(actualEpsilon) {
+		return Fail(t, "relative error is NaN", msgAndArgs...)
+	}
 	if actualEpsilon > epsilon {
 		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
 			"        < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1355,19 +1594,26 @@
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	if expected == nil || actual == nil ||
-		reflect.TypeOf(actual).Kind() != reflect.Slice ||
-		reflect.TypeOf(expected).Kind() != reflect.Slice {
+
+	if expected == nil || actual == nil {
 		return Fail(t, "Parameters must be slice", msgAndArgs...)
 	}
 
-	actualSlice := reflect.ValueOf(actual)
 	expectedSlice := reflect.ValueOf(expected)
+	actualSlice := reflect.ValueOf(actual)
 
-	for i := 0; i < actualSlice.Len(); i++ {
-		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
-		if !result {
-			return result
+	if expectedSlice.Type().Kind() != reflect.Slice {
+		return Fail(t, "Expected value must be slice", msgAndArgs...)
+	}
+
+	expectedLen := expectedSlice.Len()
+	if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
+		return false
+	}
+
+	for i := 0; i < expectedLen; i++ {
+		if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
+			return false
 		}
 	}
 
@@ -1380,10 +1626,10 @@
 
 // NoError asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.NoError(t, err) {
-//	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if assert.NoError(t, err) {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
 	if err != nil {
 		if h, ok := t.(tHelper); ok {
@@ -1397,10 +1643,8 @@
 
 // Error asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.Error(t, err) {
-//	   assert.Equal(t, expectedError, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	assert.Error(t, err)
 func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
 	if err == nil {
 		if h, ok := t.(tHelper); ok {
@@ -1415,8 +1659,8 @@
 // EqualError asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   assert.EqualError(t, err,  expectedErrorString)
+//	actualObj, err := SomeFunction()
+//	assert.EqualError(t, err,  expectedErrorString)
 func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1438,8 +1682,8 @@
 // ErrorContains asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   assert.ErrorContains(t, err,  expectedErrorSubString)
+//	actualObj, err := SomeFunction()
+//	assert.ErrorContains(t, err,  expectedErrorSubString)
 func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1458,7 +1702,6 @@
 
 // matchRegexp return true if a specified regexp matches a string.
 func matchRegexp(rx interface{}, str interface{}) bool {
-
 	var r *regexp.Regexp
 	if rr, ok := rx.(*regexp.Regexp); ok {
 		r = rr
@@ -1466,14 +1709,20 @@
 		r = regexp.MustCompile(fmt.Sprint(rx))
 	}
 
-	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
-
+	switch v := str.(type) {
+	case []byte:
+		return r.Match(v)
+	case string:
+		return r.MatchString(v)
+	default:
+		return r.MatchString(fmt.Sprint(v))
+	}
 }
 
 // Regexp asserts that a specified regexp matches a string.
 //
-//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-//  assert.Regexp(t, "start...$", "it's not starting")
+//	assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//	assert.Regexp(t, "start...$", "it's not starting")
 func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1490,8 +1739,8 @@
 
 // NotRegexp asserts that a specified regexp does not match a string.
 //
-//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-//  assert.NotRegexp(t, "^start", "it's not starting")
+//	assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//	assert.NotRegexp(t, "^start", "it's not starting")
 func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1503,7 +1752,6 @@
 	}
 
 	return !match
-
 }
 
 // Zero asserts that i is the zero value for its type.
@@ -1603,7 +1851,7 @@
 
 // JSONEq asserts that two JSON strings are equivalent.
 //
-//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//	assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
 func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1614,6 +1862,11 @@
 		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
 	}
 
+	// Shortcut if same bytes
+	if actual == expected {
+		return true
+	}
+
 	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
 		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
 	}
@@ -1632,6 +1885,11 @@
 		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
 	}
 
+	// Shortcut if same bytes
+	if actual == expected {
+		return true
+	}
+
 	if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
 		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
 	}
@@ -1719,20 +1977,21 @@
 	MaxDepth:                10,
 }
 
-type tHelper interface {
+type tHelper = interface {
 	Helper()
 }
 
 // Eventually asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+//	assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
 func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 
 	ch := make(chan bool, 1)
+	checkCond := func() { ch <- condition() }
 
 	timer := time.NewTimer(waitFor)
 	defer timer.Stop()
@@ -1740,18 +1999,131 @@
 	ticker := time.NewTicker(tick)
 	defer ticker.Stop()
 
-	for tick := ticker.C; ; {
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
 		select {
 		case <-timer.C:
 			return Fail(t, "Condition never satisfied", msgAndArgs...)
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
+		case <-tickC:
+			tickC = nil
+			go checkCond()
 		case v := <-ch:
 			if v {
 				return true
 			}
-			tick = ticker.C
+			tickC = ticker.C
+		}
+	}
+}
+
+// CollectT implements the TestingT interface and collects all errors.
+type CollectT struct {
+	// A slice of errors. Non-nil slice denotes a failure.
+	// If it's non-nil but len(c.errors) == 0, this is also a failure
+	// obtained by direct c.FailNow() call.
+	errors []error
+}
+
+// Helper is like [testing.T.Helper] but does nothing.
+func (CollectT) Helper() {}
+
+// Errorf collects the error.
+func (c *CollectT) Errorf(format string, args ...interface{}) {
+	c.errors = append(c.errors, fmt.Errorf(format, args...))
+}
+
+// FailNow stops execution by calling runtime.Goexit.
+func (c *CollectT) FailNow() {
+	c.fail()
+	runtime.Goexit()
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Reset() {
+	panic("Reset() is deprecated")
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Copy(TestingT) {
+	panic("Copy() is deprecated")
+}
+
+func (c *CollectT) fail() {
+	if !c.failed() {
+		c.errors = []error{} // Make it non-nil to mark a failure.
+	}
+}
+
+func (c *CollectT) failed() bool {
+	return c.errors != nil
+}
+
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	assert.EventuallyWithT(t, func(c *assert.CollectT) {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	var lastFinishedTickErrs []error
+	ch := make(chan *CollectT, 1)
+
+	checkCond := func() {
+		collect := new(CollectT)
+		defer func() {
+			ch <- collect
+		}()
+		condition(collect)
+	}
+
+	timer := time.NewTimer(waitFor)
+	defer timer.Stop()
+
+	ticker := time.NewTicker(tick)
+	defer ticker.Stop()
+
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
+		select {
+		case <-timer.C:
+			for _, err := range lastFinishedTickErrs {
+				t.Errorf("%v", err)
+			}
+			return Fail(t, "Condition never satisfied", msgAndArgs...)
+		case <-tickC:
+			tickC = nil
+			go checkCond()
+		case collect := <-ch:
+			if !collect.failed() {
+				return true
+			}
+			// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
+			lastFinishedTickErrs = collect.errors
+			tickC = ticker.C
 		}
 	}
 }
@@ -1759,13 +2131,14 @@
 // Never asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+//	assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
 func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 
 	ch := make(chan bool, 1)
+	checkCond := func() { ch <- condition() }
 
 	timer := time.NewTimer(waitFor)
 	defer timer.Stop()
@@ -1773,18 +2146,23 @@
 	ticker := time.NewTicker(tick)
 	defer ticker.Stop()
 
-	for tick := ticker.C; ; {
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
 		select {
 		case <-timer.C:
 			return true
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
+		case <-tickC:
+			tickC = nil
+			go checkCond()
 		case v := <-ch:
 			if v {
 				return Fail(t, "Condition satisfied", msgAndArgs...)
 			}
-			tick = ticker.C
+			tickC = ticker.C
 		}
 	}
 }
@@ -1802,9 +2180,12 @@
 	var expectedText string
 	if target != nil {
 		expectedText = target.Error()
+		if err == nil {
+			return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
+		}
 	}
 
-	chain := buildErrorChainString(err)
+	chain := buildErrorChainString(err, false)
 
 	return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
 		"expected: %q\n"+
@@ -1812,7 +2193,7 @@
 	), msgAndArgs...)
 }
 
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorIs asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -1827,7 +2208,7 @@
 		expectedText = target.Error()
 	}
 
-	chain := buildErrorChainString(err)
+	chain := buildErrorChainString(err, false)
 
 	return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
 		"found: %q\n"+
@@ -1845,24 +2226,70 @@
 		return true
 	}
 
-	chain := buildErrorChainString(err)
+	expectedType := reflect.TypeOf(target).Elem().String()
+	if err == nil {
+		return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
+			"expected: %s", expectedType), msgAndArgs...)
+	}
+
+	chain := buildErrorChainString(err, true)
 
 	return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
-		"expected: %q\n"+
-		"in chain: %s", target, chain,
+		"expected: %s\n"+
+		"in chain: %s", expectedType, chain,
 	), msgAndArgs...)
 }
 
-func buildErrorChainString(err error) string {
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !errors.As(err, target) {
+		return true
+	}
+
+	chain := buildErrorChainString(err, true)
+
+	return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+		"found: %s\n"+
+		"in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
+	), msgAndArgs...)
+}
+
+func unwrapAll(err error) (errs []error) {
+	errs = append(errs, err)
+	switch x := err.(type) {
+	case interface{ Unwrap() error }:
+		err = x.Unwrap()
+		if err == nil {
+			return
+		}
+		errs = append(errs, unwrapAll(err)...)
+	case interface{ Unwrap() []error }:
+		for _, err := range x.Unwrap() {
+			errs = append(errs, unwrapAll(err)...)
+		}
+	}
+	return
+}
+
+func buildErrorChainString(err error, withType bool) string {
 	if err == nil {
 		return ""
 	}
 
-	e := errors.Unwrap(err)
-	chain := fmt.Sprintf("%q", err.Error())
-	for e != nil {
-		chain += fmt.Sprintf("\n\t%q", e.Error())
-		e = errors.Unwrap(e)
+	var chain string
+	errs := unwrapAll(err)
+	for i := range errs {
+		if i != 0 {
+			chain += "\n\t"
+		}
+		chain += fmt.Sprintf("%q", errs[i].Error())
+		if withType {
+			chain += fmt.Sprintf(" (%T)", errs[i])
+		}
 	}
 	return chain
 }
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
index c9dccc4..a0b953a 100644
--- a/vendor/github.com/stretchr/testify/assert/doc.go
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -1,39 +1,44 @@
 // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
 //
-// Example Usage
+// # Note
+//
+// All functions in this package return a bool value indicating whether the assertion has passed.
+//
+// # Example Usage
 //
 // The following is a complete example using assert in a standard test function:
-//    import (
-//      "testing"
-//      "github.com/stretchr/testify/assert"
-//    )
 //
-//    func TestSomething(t *testing.T) {
+//	import (
+//	  "testing"
+//	  "github.com/stretchr/testify/assert"
+//	)
 //
-//      var a string = "Hello"
-//      var b string = "Hello"
+//	func TestSomething(t *testing.T) {
 //
-//      assert.Equal(t, a, b, "The two words should be the same.")
+//	  var a string = "Hello"
+//	  var b string = "Hello"
 //
-//    }
+//	  assert.Equal(t, a, b, "The two words should be the same.")
+//
+//	}
 //
 // if you assert many times, use the format below:
 //
-//    import (
-//      "testing"
-//      "github.com/stretchr/testify/assert"
-//    )
+//	import (
+//	  "testing"
+//	  "github.com/stretchr/testify/assert"
+//	)
 //
-//    func TestSomething(t *testing.T) {
-//      assert := assert.New(t)
+//	func TestSomething(t *testing.T) {
+//	  assert := assert.New(t)
 //
-//      var a string = "Hello"
-//      var b string = "Hello"
+//	  var a string = "Hello"
+//	  var b string = "Hello"
 //
-//      assert.Equal(a, b, "The two words should be the same.")
-//    }
+//	  assert.Equal(a, b, "The two words should be the same.")
+//	}
 //
-// Assertions
+// # Assertions
 //
 // Assertions allow you to easily write test code, and are global funcs in the `assert` package.
 // All assertion functions take, as the first argument, the `*testing.T` object provided by the
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 4ed341d..5a6bb75 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -12,7 +12,7 @@
 // an error if building a new request fails.
 func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
 	w := httptest.NewRecorder()
-	req, err := http.NewRequest(method, url, nil)
+	req, err := http.NewRequest(method, url, http.NoBody)
 	if err != nil {
 		return -1, err
 	}
@@ -23,7 +23,7 @@
 
 // HTTPSuccess asserts that a specified handler returns a success status code.
 //
-//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//	assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -32,12 +32,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
 	if !isSuccessCode {
-		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isSuccessCode
@@ -45,7 +45,7 @@
 
 // HTTPRedirect asserts that a specified handler returns a redirect status code.
 //
-//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -54,12 +54,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
 	if !isRedirectCode {
-		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isRedirectCode
@@ -67,7 +67,7 @@
 
 // HTTPError asserts that a specified handler returns an error status code.
 //
-//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -76,12 +76,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isErrorCode := code >= http.StatusBadRequest
 	if !isErrorCode {
-		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isErrorCode
@@ -89,7 +89,7 @@
 
 // HTTPStatusCode asserts that a specified handler returns a specified status code.
 //
-//  assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//	assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -98,12 +98,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	successful := code == statuscode
 	if !successful {
-		Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return successful
@@ -113,7 +113,10 @@
 // empty string if building a new request fails.
 func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
 	w := httptest.NewRecorder()
-	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if len(values) > 0 {
+		url += "?" + values.Encode()
+	}
+	req, err := http.NewRequest(method, url, http.NoBody)
 	if err != nil {
 		return ""
 	}
@@ -124,7 +127,7 @@
 // HTTPBodyContains asserts that a specified handler returns a
 // body that contains a string.
 //
-//  assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -135,7 +138,7 @@
 
 	contains := strings.Contains(body, fmt.Sprint(str))
 	if !contains {
-		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+		Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
 	}
 
 	return contains
@@ -144,7 +147,7 @@
 // HTTPBodyNotContains asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -155,7 +158,7 @@
 
 	contains := strings.Contains(body, fmt.Sprint(str))
 	if contains {
-		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+		Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
 	}
 
 	return !contains
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
new file mode 100644
index 0000000..5a74c4f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -0,0 +1,24 @@
+//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
+//
+// This implementation is selected with the testify_yaml_custom build tag.
+//
+//	go test -tags testify_yaml_custom
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3].
+//
+// In your test package:
+//
+//		import assertYaml "github.com/stretchr/testify/assert/yaml"
+//
+//		func init() {
+//			assertYaml.Unmarshal = func (in []byte, out interface{}) error {
+//				// ...
+//	     			return nil
+//			}
+//		}
+package yaml
+
+var Unmarshal func(in []byte, out interface{}) error
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
new file mode 100644
index 0000000..0bae80e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -0,0 +1,36 @@
+//go:build !testify_yaml_fail && !testify_yaml_custom
+
+// Package yaml is just an indirection to handle YAML deserialization.
+//
+// This package is just an indirection that allows the builder to override the
+// indirection with an alternative implementation of this package that uses
+// another implementation of YAML deserialization. This allows to not either not
+// use YAML deserialization at all, or to use another implementation than
+// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
+//
+// Alternative implementations are selected using build tags:
+//
+//   - testify_yaml_fail: [Unmarshal] always fails with an error
+//   - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
+//     before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
+//     [github.com/stretchr/testify/assert.YAMLEqf].
+//
+// Usage:
+//
+//	go test -tags testify_yaml_fail
+//
+// You can check with "go list" which implementation is linked:
+//
+//	go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//	go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//	go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//
+// [PR #1120]: https://github.com/stretchr/testify/pull/1120
+package yaml
+
+import goyaml "gopkg.in/yaml.v3"
+
+// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
+func Unmarshal(in []byte, out interface{}) error {
+	return goyaml.Unmarshal(in, out)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
new file mode 100644
index 0000000..8041803
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -0,0 +1,17 @@
+//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that always fail.
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3]:
+//
+//	go test -tags testify_yaml_fail
+package yaml
+
+import "errors"
+
+var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
+
+func Unmarshal([]byte, interface{}) error {
+	return errNotImplemented
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 956790e..964a404 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,10 +1,16 @@
 Changes by Version
 ==================
 
-2.29.2 (unreleased)
+2.30.0 (2021-12-07)
 -------------------
-- Nothing yet.
-
+- Add deprecation notice -- Yuri Shkuro
+- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro
+- Remove redundant newline in NewReporter init message (#603) -- wwade
+- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan
+- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng
+- Remove outdated reference to Zipkin model. -- Yuri Shkuro
+- Move thrift compilation to a script (#590) -- Aaron Jheng
+- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro
 
 2.29.1 (2021-05-24)
 -------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index bb7463c..ee7b212 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -21,9 +21,7 @@
 
 THRIFT_VER=0.14
 THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
-THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
-THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
-THRIFT_GEN_DIR=thrift-gen
+THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift
 
 PASS=$(shell printf "\033[32mPASS\033[0m")
 FAIL=$(shell printf "\033[31mFAIL\033[0m")
@@ -105,19 +103,7 @@
 # TODO at the moment we're not generating tchan_*.go files
 .PHONY: thrift-compile
 thrift-compile: thrift-image
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
-	sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
-	sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
-	sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
-		$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
-	rm -rf thrift-gen/*/*-remote
-	rm -rf crossdock/thrift/*/*-remote
-	rm -rf thrift-gen/jaeger/collector.go
+	docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh
 
 .PHONY: idl-submodule
 idl-submodule:
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 687f578..e23912b 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -1,5 +1,9 @@
 [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
 
+# 🛑 This library is being deprecated!
+
+We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details.
+
 # Jaeger Bindings for Go OpenTracing API
 
 Instrumentation library that implements an
@@ -15,6 +19,12 @@
 
 ## Installation
 
+### Preferred
+
+Add `github.com/uber/jaeger-client-go` to `go.mod`.
+
+### Old way
+
 We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
 and [semantic versioning](http://semver.org/) when including this library into an application.
 For example, Jaeger backend imports this library like this:
@@ -39,15 +49,19 @@
 
 ## Initialization
 
-See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples)
 and [config/example_test.go](./config/example_test.go).
 
+There are two ways to create a tracer:
+  * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section).
+  * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions.
+
 ### Environment variables
 
 The tracer can be initialized with values coming from environment variables, if it is
 [built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
 that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
-None of the env vars are required and all of them can be overridden via direct setting 
+None of the env vars are required and all of them can be overridden via direct setting
 of the property on the configuration object.
 
 Property| Description
@@ -70,6 +84,7 @@
 JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
 JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
 JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids.
 JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
 JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
 
@@ -194,7 +209,7 @@
 of the root span. It involves several features and architectural changes:
   * **Shared sampling state**: the sampling state is shared across all local
     (i.e. in-process) spans for a given trace.
-  * **New `SamplerV2` API** allows the sampler to be called at multiple points 
+  * **New `SamplerV2` API** allows the sampler to be called at multiple points
     in the life cycle of a span:
     * on span creation
     * on overwriting span operation name
@@ -311,8 +326,8 @@
 [Apache 2.0 License](LICENSE).
 
 
-[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
-[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg
+[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go
 [ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
 [ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
 [cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index c2222f1..0667635 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -420,7 +420,7 @@
 		jaeger.ReporterOptions.Logger(logger),
 		jaeger.ReporterOptions.Metrics(metrics))
 	if rc.LogSpans && logger != nil {
-		logger.Infof("Initializing logging reporter\n")
+		logger.Infof("Initializing logging reporter")
 		reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
 	}
 	return reporter, err
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index d8eb698..35710cf 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@
 
 const (
 	// JaegerClientVersion is the version of the client library reported as Span tag.
-	JaegerClientVersion = "Go-2.29.1"
+	JaegerClientVersion = "Go-2.30.0"
 
 	// JaegerClientVersionTagKey is the name of the tag used to report client version.
 	JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
index 4f55490..fac3c09 100644
--- a/vendor/github.com/uber/jaeger-client-go/doc.go
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -14,8 +14,6 @@
 
 /*
 Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
-It is currently using Zipkin-compatible data model and can be directly
-itegrated with Zipkin backend (http://zipkin.io).
 
 For integration instructions please refer to the README:
 
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
index 6fe0cd0..fba1e43 100644
--- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -16,7 +16,7 @@
 
 import "sync"
 
-// SpanAllocator abstraction of managign span allocations
+// SpanAllocator abstraction of managing span allocations
 type SpanAllocator interface {
 	Get() *Span
 	Put(*Span)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
index ac9bd48..ca25568 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -23,7 +23,7 @@
 	"context"
 )
 
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
 type (
 	headerKey     string
 	headerKeyList int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
index d884c6a..02f0613 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -23,7 +23,7 @@
 	"context"
 )
 
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
 type responseHelperKey struct{}
 
 // TResponseHelper defines a object with a set of helper functions that can be
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
index f0734b7..16b4606 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -27,27 +27,30 @@
 // TracerOption is a function that sets some option on the tracer
 type TracerOption func(tracer *Tracer)
 
-// TracerOptions is a factory for all available TracerOption's
-var TracerOptions tracerOptions
+// TracerOptions is a factory for all available TracerOption's.
+var TracerOptions TracerOptionsFactory
 
-type tracerOptions struct{}
+// TracerOptionsFactory is a struct that defines functions for all available TracerOption's.
+type TracerOptionsFactory struct{}
 
 // Metrics creates a TracerOption that initializes Metrics on the tracer,
 // which is used to emit statistics.
-func (tracerOptions) Metrics(m *Metrics) TracerOption {
+func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.metrics = *m
 	}
 }
 
 // Logger creates a TracerOption that gives the tracer a Logger.
-func (tracerOptions) Logger(logger Logger) TracerOption {
+func (TracerOptionsFactory) Logger(logger Logger) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.logger = log.DebugLogAdapter(logger)
 	}
 }
 
-func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+// CustomHeaderKeys allows to override default HTTP header keys used to propagate
+// tracing context.
+func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
 	return func(tracer *Tracer) {
 		if headerKeys == nil {
 			return
@@ -62,7 +65,7 @@
 
 // TimeNow creates a TracerOption that gives the tracer a function
 // used to generate timestamps for spans.
-func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.timeNow = timeNow
 	}
@@ -70,7 +73,7 @@
 
 // RandomNumber creates a TracerOption that gives the tracer
 // a thread-safe random number generator function for generating trace IDs.
-func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.randomNumber = randomNumber
 	}
@@ -80,7 +83,7 @@
 // an object pool to minimize span allocations.
 // This should be used with care, only if the service is not running any async tasks
 // that can access parent spans after those spans have been finished.
-func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption {
 	return func(tracer *Tracer) {
 		if poolSpans {
 			tracer.spanAllocator = newSyncPollSpanAllocator()
@@ -90,56 +93,67 @@
 	}
 }
 
-// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// HostIPv4 creates a TracerOption that identifies the current service/process.
 // If not set, the factory method will obtain the current IP address.
 // The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
-func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+//
+// Deprecated.
+func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.hostIPv4 = hostIPv4
 	}
 }
 
-func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+// Injector registers a Injector for given format.
+func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.injectors[format] = injector
 	}
 }
 
-func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+// Extractor registers an Extractor for given format.
+func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.extractors[format] = extractor
 	}
 }
 
-func (t tracerOptions) Observer(observer Observer) TracerOption {
+// Observer registers an Observer.
+func (t TracerOptionsFactory) Observer(observer Observer) TracerOption {
 	return t.ContribObserver(&oldObserver{obs: observer})
 }
 
-func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+// ContribObserver registers a ContribObserver.
+func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.observer.append(observer)
 	}
 }
 
-func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+// Gen128Bit enables generation of 128bit trace IDs.
+func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.gen128Bit = gen128Bit
 	}
 }
 
-func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context
+// when the trace is force-started via sampling=1 span tag.
+func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
 	}
 }
 
-func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+// HighTraceIDGenerator allows to override define ID generator.
+func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.highTraceIDGenerator = highTraceIDGenerator
 	}
 }
 
-func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+// MaxTagValueLength sets the limit on the max length of tag values.
+func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.maxTagValueLength = maxTagValueLength
 	}
@@ -151,31 +165,37 @@
 //
 // About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
 // half are the newest logs.
-func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.maxLogsPerSpan = maxLogsPerSpan
 	}
 }
 
-func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID
+// from the client span from the incoming request, for compatibility with Zipkin's
+// "one span per RPC" model.
+func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
 	}
 }
 
-func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+// Tag adds a tracer-level tag that will be added to all spans.
+func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.tags = append(tracer.tags, Tag{key: key, value: value})
 	}
 }
 
-func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+// BaggageRestrictionManager registers BaggageRestrictionManager.
+func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.baggageRestrictionManager = mgr
 	}
 }
 
-func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+// DebugThrottler registers a Throttler for debug spans.
+func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.debugThrottler = throttler
 	}